langtrace-python-sdk 1.3.3__py3-none-any.whl → 1.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,93 @@
1
+ import json
2
+
3
+ from dotenv import find_dotenv, load_dotenv
4
+ from openai import AsyncOpenAI
5
+
6
+ from langtrace_python_sdk import langtrace
7
+
8
+ # from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
9
+
10
+ _ = load_dotenv(find_dotenv())
11
+
12
+ langtrace.init(write_to_langtrace_cloud=False)
13
+
14
+ client = AsyncOpenAI()
15
+
16
+ # Example dummy function hard coded to return the same weather
17
+ # In production, this could be your backend API or an external API
18
+ def get_current_weather(location, unit="fahrenheit"):
19
+ """Get the current weather in a given location"""
20
+ if "tokyo" in location.lower():
21
+ return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
22
+ elif "san francisco" in location.lower():
23
+ return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
24
+ elif "paris" in location.lower():
25
+ return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
26
+ else:
27
+ return json.dumps({"location": location, "temperature": "unknown"})
28
+
29
+
30
+ async def run_conversation():
31
+ # Step 1: send the conversation and available functions to the model
32
+ messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
33
+ tools = [
34
+ {
35
+ "type": "function",
36
+ "function": {
37
+ "name": "get_current_weather",
38
+ "description": "Get the current weather in a given location",
39
+ "parameters": {
40
+ "type": "object",
41
+ "properties": {
42
+ "location": {
43
+ "type": "string",
44
+ "description": "The city and state, e.g. San Francisco, CA",
45
+ },
46
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
47
+ },
48
+ "required": ["location"],
49
+ },
50
+ },
51
+ }
52
+ ]
53
+ response = await client.chat.completions.create(
54
+ model="gpt-3.5-turbo-0125",
55
+ messages=messages,
56
+ tools=tools,
57
+ tool_choice="auto", # auto is default, but we'll be explicit
58
+ )
59
+ # print(response)
60
+ response_message = response.choices[0].message
61
+ tool_calls = response_message.tool_calls
62
+ # Step 2: check if the model wanted to call a function
63
+ if tool_calls:
64
+ # Step 3: call the function
65
+ # Note: the JSON response may not always be valid; be sure to handle errors
66
+ available_functions = {
67
+ "get_current_weather": get_current_weather,
68
+ } # only one function in this example, but you can have multiple
69
+ messages.append(response_message) # extend conversation with assistant's reply
70
+ # Step 4: send the info for each function call and function response to the model
71
+ for tool_call in tool_calls:
72
+ function_name = tool_call.function.name
73
+ function_to_call = available_functions[function_name]
74
+ function_args = json.loads(tool_call.function.arguments)
75
+ function_response = function_to_call(
76
+ location=function_args.get("location"),
77
+ unit=function_args.get("unit"),
78
+ )
79
+ messages.append(
80
+ {
81
+ "tool_call_id": tool_call.id,
82
+ "role": "tool",
83
+ "name": function_name,
84
+ "content": function_response,
85
+ }
86
+ ) # extend conversation with function response
87
+ second_response = await client.chat.completions.create(
88
+ model="gpt-3.5-turbo-0125",
89
+ messages=messages,
90
+ ) # get a new response from the model where it can see the function response
91
+ # print(second_response)
92
+ return second_response
93
+
@@ -0,0 +1,167 @@
1
+ import json
2
+
3
+ from dotenv import find_dotenv, load_dotenv
4
+ from openai import AsyncOpenAI
5
+
6
+ from langtrace_python_sdk import langtrace
7
+
8
+ # from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
9
+
10
+ _ = load_dotenv(find_dotenv())
11
+
12
+ langtrace.init(write_to_langtrace_cloud=False)
13
+
14
+ client = AsyncOpenAI()
15
+
16
+
17
+ # Example dummy function hard coded to return the same weather
18
+ # In production, this could be your backend API or an external API
19
+ def get_current_weather(location, unit="fahrenheit"):
20
+ """Get the current weather in a given location"""
21
+ if "tokyo" in location.lower():
22
+ return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
23
+ elif "san francisco" in location.lower():
24
+ return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
25
+ elif "paris" in location.lower():
26
+ return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
27
+ else:
28
+ return json.dumps({"location": location, "temperature": "unknown"})
29
+
30
+
31
+ def get_current_time(location):
32
+ """Get the current time in a given location"""
33
+ if "tokyo" in location.lower():
34
+ return json.dumps({"location": "Tokyo", "time": "10"})
35
+ elif "san francisco" in location.lower():
36
+ return json.dumps({"location": "San Francisco", "time": "72"})
37
+ elif "paris" in location.lower():
38
+ return json.dumps({"location": "Paris", "time": "22"})
39
+ else:
40
+ return json.dumps({"location": location, "time": "unknown"})
41
+
42
+
43
+ async def run_conversation():
44
+ # Step 1: send the conversation and available functions to the model
45
+ messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
46
+ tools = [
47
+ {
48
+ "type": "function",
49
+ "function": {
50
+ "name": "get_current_weather",
51
+ "description": "Get the current weather in a given location",
52
+ "parameters": {
53
+ "type": "object",
54
+ "properties": {
55
+ "location": {
56
+ "type": "string",
57
+ "description": "The city and state, e.g. San Francisco, CA",
58
+ },
59
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
60
+ },
61
+ "required": ["location"],
62
+ },
63
+ },
64
+ },
65
+ {
66
+ "type": "function",
67
+ "function": {
68
+ "name": "get_current_time",
69
+ "description": "Get the current time in a given location",
70
+ "parameters": {
71
+ "type": "object",
72
+ "properties": {
73
+ "location": {
74
+ "type": "string",
75
+ "description": "The city and state, e.g. San Francisco, CA",
76
+ },
77
+ },
78
+ "required": ["location"],
79
+ },
80
+ },
81
+ }
82
+ ]
83
+ response = await client.chat.completions.create(
84
+ model="gpt-4",
85
+ messages=messages,
86
+ tools=tools,
87
+ tool_choice="auto", # auto is default, but we'll be explicit
88
+ stream=True,
89
+ )
90
+
91
+ # For streaming, uncomment the following lines
92
+ tool_call_dict = {}
93
+ tool_calls = []
94
+ id = ""
95
+ name = ""
96
+ arguments = ""
97
+ async for chunk in response:
98
+ if chunk.choices[0].delta is not None and chunk.choices[0].delta.tool_calls is not None:
99
+ for choice in chunk.choices:
100
+ for tool_call in choice.delta.tool_calls:
101
+ if tool_call.id and id != tool_call.id:
102
+ id = tool_call.id if tool_call.id else ""
103
+ name = tool_call.function.name if tool_call.function and tool_call.function.name else ""
104
+ tool_call_dict[name] = {
105
+ "id": id,
106
+ "function": {
107
+ "name": name,
108
+ "arguments": arguments
109
+ },
110
+ "type": "function"
111
+ }
112
+ arguments += tool_call.function.arguments if tool_call.function and tool_call.function.arguments else ""
113
+ if name != "":
114
+ tool_call_dict[name] = {
115
+ "id": id,
116
+ "function": {
117
+ "name": name,
118
+ "arguments": arguments
119
+ },
120
+ "type": "function"
121
+ }
122
+ for key, value in tool_call_dict.items():
123
+ tool_calls.append(value)
124
+
125
+ # Step 2: check if the model wanted to call a function
126
+ if tool_calls:
127
+ # Step 3: call the function
128
+ # Note: the JSON response may not always be valid; be sure to handle errors
129
+ available_functions = {
130
+ "get_current_weather": get_current_weather,
131
+ "get_current_time": get_current_time,
132
+ } # only one function in this example, but you can have multiple
133
+ # messages.append(response_message) # extend conversation with assistant's reply
134
+ # Step 4: send the info for each function call and function response to the model
135
+ for tool_call in tool_calls:
136
+ function_name = tool_call['function']['name']
137
+ function_to_call = available_functions[function_name]
138
+ function_args = json.loads(tool_call['function']['arguments'])
139
+ function_response = function_to_call(
140
+ location=function_args.get("location"),
141
+ unit=function_args.get("unit"),
142
+ )
143
+ func_res = json.loads(function_response)
144
+ content = f"Use the below information to answer the user's question: The current weather in {func_res['location']} is {func_res['temperature']} degrees {func_res['unit']}"
145
+ messages.append(
146
+ {
147
+ "role": "system",
148
+ "content": content
149
+ }
150
+ ) # extend conversation with function response
151
+ print(messages)
152
+ second_response = await client.chat.completions.create(
153
+ model="gpt-4",
154
+ messages=messages,
155
+ stream=True,
156
+ ) # get a new response from the model where it can see the function response
157
+ result = []
158
+ async for chunk in second_response:
159
+ if chunk.choices[0].delta.content is not None:
160
+ content = [
161
+ choice.delta.content if choice.delta and
162
+ choice.delta.content else ""
163
+ for choice in chunk.choices]
164
+ result.append(
165
+ content[0] if len(content) > 0 else "")
166
+ print("".join(result))
167
+ # return second_response
@@ -7,7 +7,7 @@ from langtrace_python_sdk.utils.with_root_span import (
7
7
 
8
8
  _ = load_dotenv(find_dotenv())
9
9
 
10
- langtrace.init(write_to_langtrace_cloud=True)
10
+ langtrace.init(write_to_langtrace_cloud=False)
11
11
  client = OpenAI()
12
12
 
13
13
 
@@ -26,7 +26,7 @@ def api2():
26
26
  response = client.chat.completions.create(
27
27
  model="gpt-4",
28
28
  messages=[{"role": "user", "content": "Say this is a test three times"}],
29
- stream=False,
29
+ stream=True,
30
30
  )
31
31
  return response
32
32
 
@@ -35,25 +35,24 @@ def api2():
35
35
  def chat_completion():
36
36
  response = api1()
37
37
  response = api2()
38
- return response
38
+ result = []
39
+ for chunk in response:
40
+ if chunk.choices[0].delta.content is not None:
41
+ content = [
42
+ choice.delta.content if choice.delta and
43
+ choice.delta.content else ""
44
+ for choice in chunk.choices]
45
+ result.append(
46
+ content[0] if len(content) > 0 else "")
39
47
 
48
+ print("".join(result))
49
+ # return response
40
50
 
41
- # print(response)
51
+
52
+ # # print(response)
42
53
  # stream = client.chat.completions.create(
43
54
  # model="gpt-4",
44
55
  # messages=[{"role": "user", "content": "Say this is a test three times"}, {"role": "assistant", "content": "This is a test. This is a test. This is a test"},
45
56
  # {"role": "user", "content": "Say this is a mock 4 times"}],
46
57
  # stream=False,
47
58
  # )
48
-
49
- # result = []
50
- # for chunk in response:
51
- # if chunk.choices[0].delta.content is not None:
52
- # content = [
53
- # choice.delta.content if choice.delta and
54
- # choice.delta.content else ""
55
- # for choice in chunk.choices]
56
- # result.append(
57
- # content[0] if len(content) > 0 else "")
58
-
59
- # print("".join(result))
@@ -34,7 +34,7 @@ student_custom_functions = [
34
34
  ]
35
35
 
36
36
 
37
- @with_langtrace_root_span()
37
+ # @with_langtrace_root_span()
38
38
  def function_calling():
39
39
  response = client.chat.completions.create(
40
40
  model="gpt-3.5-turbo",
@@ -46,21 +46,21 @@ def function_calling():
46
46
  ],
47
47
  functions=student_custom_functions,
48
48
  function_call="auto",
49
- stream=False,
49
+ stream=True,
50
50
  )
51
51
 
52
- # result = []
53
- # for chunk in response:
54
- # if chunk.choices[0].delta.function_call is not None:
55
- # content = [
56
- # choice.delta.function_call.arguments if choice.delta.function_call and
57
- # choice.delta.function_call.arguments else ""
58
- # for choice in chunk.choices]
59
- # result.append(
60
- # content[0] if len(content) > 0 else "")
52
+ result = []
53
+ for chunk in response:
54
+ if chunk.choices[0].delta.function_call is not None:
55
+ content = [
56
+ choice.delta.function_call.arguments if choice.delta.function_call and
57
+ choice.delta.function_call.arguments else ""
58
+ for choice in chunk.choices]
59
+ result.append(
60
+ content[0] if len(content) > 0 else "")
61
61
 
62
- # print("".join(result))
62
+ print("".join(result))
63
63
 
64
64
  # Loading the response as a JSON object
65
- json_response = json.loads(response.choices[0].message.function_call.arguments)
66
- print(json_response)
65
+ # json_response = json.loads(response.choices[0].message.function_call.arguments)
66
+ # print(json_response)
@@ -0,0 +1,92 @@
1
+ import json
2
+
3
+ from dotenv import find_dotenv, load_dotenv
4
+ from openai import OpenAI
5
+
6
+ from langtrace_python_sdk import langtrace
7
+
8
+ # from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
9
+
10
+ _ = load_dotenv(find_dotenv())
11
+
12
+ langtrace.init(write_to_langtrace_cloud=False)
13
+
14
+ client = OpenAI()
15
+
16
+ # Example dummy function hard coded to return the same weather
17
+ # In production, this could be your backend API or an external API
18
+ def get_current_weather(location, unit="fahrenheit"):
19
+ """Get the current weather in a given location"""
20
+ if "tokyo" in location.lower():
21
+ return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
22
+ elif "san francisco" in location.lower():
23
+ return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
24
+ elif "paris" in location.lower():
25
+ return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
26
+ else:
27
+ return json.dumps({"location": location, "temperature": "unknown"})
28
+
29
+
30
+ def run_conversation():
31
+ # Step 1: send the conversation and available functions to the model
32
+ messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
33
+ tools = [
34
+ {
35
+ "type": "function",
36
+ "function": {
37
+ "name": "get_current_weather",
38
+ "description": "Get the current weather in a given location",
39
+ "parameters": {
40
+ "type": "object",
41
+ "properties": {
42
+ "location": {
43
+ "type": "string",
44
+ "description": "The city and state, e.g. San Francisco, CA",
45
+ },
46
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
47
+ },
48
+ "required": ["location"],
49
+ },
50
+ },
51
+ }
52
+ ]
53
+ response = client.chat.completions.create(
54
+ model="gpt-3.5-turbo-0125",
55
+ messages=messages,
56
+ tools=tools,
57
+ tool_choice="auto", # auto is default, but we'll be explicit
58
+ )
59
+ # print(response)
60
+ response_message = response.choices[0].message
61
+ tool_calls = response_message.tool_calls
62
+ # Step 2: check if the model wanted to call a function
63
+ if tool_calls:
64
+ # Step 3: call the function
65
+ # Note: the JSON response may not always be valid; be sure to handle errors
66
+ available_functions = {
67
+ "get_current_weather": get_current_weather,
68
+ } # only one function in this example, but you can have multiple
69
+ messages.append(response_message) # extend conversation with assistant's reply
70
+ # Step 4: send the info for each function call and function response to the model
71
+ for tool_call in tool_calls:
72
+ function_name = tool_call.function.name
73
+ function_to_call = available_functions[function_name]
74
+ function_args = json.loads(tool_call.function.arguments)
75
+ function_response = function_to_call(
76
+ location=function_args.get("location"),
77
+ unit=function_args.get("unit"),
78
+ )
79
+ messages.append(
80
+ {
81
+ "tool_call_id": tool_call.id,
82
+ "role": "tool",
83
+ "name": function_name,
84
+ "content": function_response,
85
+ }
86
+ ) # extend conversation with function response
87
+ second_response = client.chat.completions.create(
88
+ model="gpt-3.5-turbo-0125",
89
+ messages=messages,
90
+ ) # get a new response from the model where it can see the function response
91
+ # print(second_response)
92
+ return second_response
@@ -0,0 +1,167 @@
1
+ import json
2
+
3
+ from dotenv import find_dotenv, load_dotenv
4
+ from openai import OpenAI
5
+
6
+ from langtrace_python_sdk import langtrace
7
+
8
+ # from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
9
+
10
+ _ = load_dotenv(find_dotenv())
11
+
12
+ langtrace.init(write_to_langtrace_cloud=False)
13
+
14
+ client = OpenAI()
15
+
16
+
17
+ # Example dummy function hard coded to return the same weather
18
+ # In production, this could be your backend API or an external API
19
+ def get_current_weather(location, unit="fahrenheit"):
20
+ """Get the current weather in a given location"""
21
+ if "tokyo" in location.lower():
22
+ return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
23
+ elif "san francisco" in location.lower():
24
+ return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
25
+ elif "paris" in location.lower():
26
+ return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
27
+ else:
28
+ return json.dumps({"location": location, "temperature": "unknown"})
29
+
30
+
31
+ def get_current_time(location):
32
+ """Get the current time in a given location"""
33
+ if "tokyo" in location.lower():
34
+ return json.dumps({"location": "Tokyo", "time": "10"})
35
+ elif "san francisco" in location.lower():
36
+ return json.dumps({"location": "San Francisco", "time": "72"})
37
+ elif "paris" in location.lower():
38
+ return json.dumps({"location": "Paris", "time": "22"})
39
+ else:
40
+ return json.dumps({"location": location, "time": "unknown"})
41
+
42
+
43
+ def run_conversation():
44
+ # Step 1: send the conversation and available functions to the model
45
+ messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
46
+ tools = [
47
+ {
48
+ "type": "function",
49
+ "function": {
50
+ "name": "get_current_weather",
51
+ "description": "Get the current weather in a given location",
52
+ "parameters": {
53
+ "type": "object",
54
+ "properties": {
55
+ "location": {
56
+ "type": "string",
57
+ "description": "The city and state, e.g. San Francisco, CA",
58
+ },
59
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
60
+ },
61
+ "required": ["location"],
62
+ },
63
+ },
64
+ },
65
+ {
66
+ "type": "function",
67
+ "function": {
68
+ "name": "get_current_time",
69
+ "description": "Get the current time in a given location",
70
+ "parameters": {
71
+ "type": "object",
72
+ "properties": {
73
+ "location": {
74
+ "type": "string",
75
+ "description": "The city and state, e.g. San Francisco, CA",
76
+ },
77
+ },
78
+ "required": ["location"],
79
+ },
80
+ },
81
+ }
82
+ ]
83
+ response = client.chat.completions.create(
84
+ model="gpt-4",
85
+ messages=messages,
86
+ tools=tools,
87
+ tool_choice="auto", # auto is default, but we'll be explicit
88
+ stream=True,
89
+ )
90
+
91
+ # For streaming, uncomment the following lines
92
+ tool_call_dict = {}
93
+ tool_calls = []
94
+ id = ""
95
+ name = ""
96
+ arguments = ""
97
+ for chunk in response:
98
+ if chunk.choices[0].delta is not None and chunk.choices[0].delta.tool_calls is not None:
99
+ for choice in chunk.choices:
100
+ for tool_call in choice.delta.tool_calls:
101
+ if tool_call.id and id != tool_call.id:
102
+ id = tool_call.id if tool_call.id else ""
103
+ name = tool_call.function.name if tool_call.function and tool_call.function.name else ""
104
+ tool_call_dict[name] = {
105
+ "id": id,
106
+ "function": {
107
+ "name": name,
108
+ "arguments": arguments
109
+ },
110
+ "type": "function"
111
+ }
112
+ arguments += tool_call.function.arguments if tool_call.function and tool_call.function.arguments else ""
113
+ if name != "":
114
+ tool_call_dict[name] = {
115
+ "id": id,
116
+ "function": {
117
+ "name": name,
118
+ "arguments": arguments
119
+ },
120
+ "type": "function"
121
+ }
122
+ for key, value in tool_call_dict.items():
123
+ tool_calls.append(value)
124
+
125
+ # Step 2: check if the model wanted to call a function
126
+ if tool_calls:
127
+ # Step 3: call the function
128
+ # Note: the JSON response may not always be valid; be sure to handle errors
129
+ available_functions = {
130
+ "get_current_weather": get_current_weather,
131
+ "get_current_time": get_current_time,
132
+ } # only one function in this example, but you can have multiple
133
+ # messages.append(response_message) # extend conversation with assistant's reply
134
+ # Step 4: send the info for each function call and function response to the model
135
+ for tool_call in tool_calls:
136
+ function_name = tool_call['function']['name']
137
+ function_to_call = available_functions[function_name]
138
+ function_args = json.loads(tool_call['function']['arguments'])
139
+ function_response = function_to_call(
140
+ location=function_args.get("location"),
141
+ unit=function_args.get("unit"),
142
+ )
143
+ func_res = json.loads(function_response)
144
+ content = f"Use the below information to answer the user's question: The current weather in {func_res['location']} is {func_res['temperature']} degrees {func_res['unit']}"
145
+ messages.append(
146
+ {
147
+ "role": "system",
148
+ "content": content
149
+ }
150
+ ) # extend conversation with function response
151
+ print(messages)
152
+ second_response = client.chat.completions.create(
153
+ model="gpt-4",
154
+ messages=messages,
155
+ stream=True,
156
+ ) # get a new response from the model where it can see the function response
157
+ result = []
158
+ for chunk in second_response:
159
+ if chunk.choices[0].delta.content is not None:
160
+ content = [
161
+ choice.delta.content if choice.delta and
162
+ choice.delta.content else ""
163
+ for choice in chunk.choices]
164
+ result.append(
165
+ content[0] if len(content) > 0 else "")
166
+ print("".join(result))
167
+ # return second_response
@@ -75,12 +75,12 @@ def images_generate(original_method, version, tracer):
75
75
 
76
76
  span.set_status(StatusCode.OK)
77
77
  return result
78
- except Exception as e:
78
+ except Exception as err:
79
79
  # Record the exception in the span
80
- span.record_exception(e)
80
+ span.record_exception(err)
81
81
 
82
82
  # Set the span status to indicate an error
83
- span.set_status(Status(StatusCode.ERROR, str(e)))
83
+ span.set_status(Status(StatusCode.ERROR, str(err)))
84
84
 
85
85
  # Reraise the exception to ensure it's not swallowed
86
86
  raise
@@ -147,12 +147,12 @@ def async_images_generate(original_method, version, tracer):
147
147
 
148
148
  span.set_status(StatusCode.OK)
149
149
  return result
150
- except Exception as e:
150
+ except Exception as err:
151
151
  # Record the exception in the span
152
- span.record_exception(e)
152
+ span.record_exception(err)
153
153
 
154
154
  # Set the span status to indicate an error
155
- span.set_status(Status(StatusCode.ERROR, str(e)))
155
+ span.set_status(Status(StatusCode.ERROR, str(err)))
156
156
 
157
157
  # Reraise the exception to ensure it's not swallowed
158
158
  raise
@@ -181,9 +181,9 @@ def chat_completions_create(original_method, version, tracer):
181
181
  # handle tool calls in the kwargs
182
182
  llm_prompts = []
183
183
  for item in kwargs.get("messages", []):
184
- if "tool_calls" in item:
184
+ if hasattr(item, "tool_calls") and item.tool_calls is not None:
185
185
  tool_calls = []
186
- for tool_call in item["tool_calls"]:
186
+ for tool_call in item.tool_calls:
187
187
  tool_call_dict = {
188
188
  "id": tool_call.id if hasattr(tool_call, "id") else "",
189
189
  "type": tool_call.type if hasattr(tool_call, "type") else "",
@@ -202,8 +202,9 @@ def chat_completions_create(original_method, version, tracer):
202
202
  ),
203
203
  }
204
204
  tool_calls.append(tool_call_dict)
205
- item["tool_calls"] = tool_calls
206
- llm_prompts.append(item)
205
+ llm_prompts.append(tool_calls)
206
+ else:
207
+ llm_prompts.append(item)
207
208
 
208
209
  span_attributes = {
209
210
  "langtrace.sdk.name": "langtrace-python-sdk",
@@ -213,13 +214,14 @@ def chat_completions_create(original_method, version, tracer):
213
214
  "langtrace.version": "1.0.0",
214
215
  "url.full": base_url,
215
216
  "llm.api": APIS["CHAT_COMPLETION"]["ENDPOINT"],
216
- "llm.prompts": json.dumps(kwargs.get("messages", [])),
217
+ "llm.prompts": json.dumps(llm_prompts),
217
218
  "llm.stream": kwargs.get("stream"),
218
219
  **(extra_attributes if extra_attributes is not None else {}),
219
220
  }
220
221
 
221
222
  attributes = LLMSpanAttributes(**span_attributes)
222
223
 
224
+ tools = []
223
225
  if kwargs.get("temperature") is not None:
224
226
  attributes.llm_temperature = kwargs.get("temperature")
225
227
  if kwargs.get("top_p") is not None:
@@ -227,7 +229,11 @@ def chat_completions_create(original_method, version, tracer):
227
229
  if kwargs.get("user") is not None:
228
230
  attributes.llm_user = kwargs.get("user")
229
231
  if kwargs.get("functions") is not None:
230
- attributes.llm_function_prompts = json.dumps(kwargs.get("functions"))
232
+ tools.append(json.dumps(kwargs.get("functions")))
233
+ if kwargs.get("tools") is not None:
234
+ tools.append(json.dumps(kwargs.get("tools")))
235
+ if len(tools) > 0:
236
+ attributes.llm_tools = json.dumps(tools)
231
237
 
232
238
  # TODO(Karthik): Gotta figure out how to handle streaming with context
233
239
  # with tracer.start_as_current_span(APIS["CHAT_COMPLETION"]["METHOD"],
@@ -252,16 +258,7 @@ def chat_completions_create(original_method, version, tracer):
252
258
  if choice.message and choice.message.role
253
259
  else "assistant"
254
260
  ),
255
- "content": (
256
- choice.message.content
257
- if choice.message and choice.message.content
258
- else (
259
- choice.message.function_call.arguments
260
- if choice.message
261
- and choice.message.function_call.arguments
262
- else ""
263
- )
264
- ),
261
+ "content": extract_content(choice),
265
262
  **(
266
263
  {
267
264
  "content_filter_results": choice[
@@ -319,6 +316,7 @@ def chat_completions_create(original_method, version, tracer):
319
316
  span,
320
317
  prompt_tokens,
321
318
  function_call=kwargs.get("functions") is not None,
319
+ tool_calls=kwargs.get("tools") is not None,
322
320
  )
323
321
 
324
322
  except Exception as error:
@@ -327,7 +325,7 @@ def chat_completions_create(original_method, version, tracer):
327
325
  span.end()
328
326
  raise
329
327
 
330
- def handle_streaming_response(result, span, prompt_tokens, function_call=False):
328
+ def handle_streaming_response(result, span, prompt_tokens, function_call=False, tool_calls=False):
331
329
  """Process and yield streaming response chunks."""
332
330
  result_content = []
333
331
  span.add_event(Event.STREAM_START.value)
@@ -337,37 +335,29 @@ def chat_completions_create(original_method, version, tracer):
337
335
  if hasattr(chunk, "model") and chunk.model is not None:
338
336
  span.set_attribute("llm.model", chunk.model)
339
337
  if hasattr(chunk, "choices") and chunk.choices is not None:
340
- token_counts = [
341
- (
342
- estimate_tokens(choice.delta.content)
343
- if choice.delta and choice.delta.content
344
- else (
345
- estimate_tokens(choice.delta.function_call.arguments)
346
- if choice.delta.function_call
347
- and choice.delta.function_call.arguments
348
- else 0
349
- )
350
- )
351
- for choice in chunk.choices
352
- ]
353
- completion_tokens += sum(token_counts)
354
- content = [
355
- (
356
- choice.delta.content
357
- if choice.delta and choice.delta.content
358
- else (
359
- choice.delta.function_call.arguments
360
- if choice.delta.function_call
361
- and choice.delta.function_call.arguments
362
- else ""
363
- )
364
- )
365
- for choice in chunk.choices
366
- ]
338
+ if not function_call and not tool_calls:
339
+ for choice in chunk.choices:
340
+ if choice.delta and choice.delta.content is not None:
341
+ token_counts = estimate_tokens(choice.delta.content)
342
+ completion_tokens += token_counts
343
+ content = [choice.delta.content]
344
+ elif function_call:
345
+ for choice in chunk.choices:
346
+ if choice.delta and choice.delta.function_call and choice.delta.function_call.arguments is not None:
347
+ token_counts = estimate_tokens(choice.delta.function_call.arguments)
348
+ completion_tokens += token_counts
349
+ content = [
350
+ choice.delta.function_call.arguments
351
+ ]
352
+ elif tool_calls:
353
+ # TODO(Karthik): Tool calls streaming is tricky. The chunks after the
354
+ # first one are missing the function name and id though the arguments
355
+ # are spread across the chunks.
356
+ content = []
367
357
  else:
368
358
  content = []
369
359
  span.add_event(
370
- Event.STREAM_OUTPUT.value, {"response": "".join(content)}
360
+ Event.STREAM_OUTPUT.value, {"response": "".join(content) if len(content) > 0 and content[0] is not None else ""}
371
361
  )
372
362
  result_content.append(content[0] if len(content) > 0 else "")
373
363
  yield chunk
@@ -422,6 +412,34 @@ def async_chat_completions_create(original_method, version, tracer):
422
412
 
423
413
  extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
424
414
 
415
+ # handle tool calls in the kwargs
416
+ llm_prompts = []
417
+ for item in kwargs.get("messages", []):
418
+ if hasattr(item, "tool_calls") and item.tool_calls is not None:
419
+ tool_calls = []
420
+ for tool_call in item.tool_calls:
421
+ tool_call_dict = {
422
+ "id": tool_call.id if hasattr(tool_call, "id") else "",
423
+ "type": tool_call.type if hasattr(tool_call, "type") else "",
424
+ }
425
+ if hasattr(tool_call, "function"):
426
+ tool_call_dict["function"] = {
427
+ "name": (
428
+ tool_call.function.name
429
+ if hasattr(tool_call.function, "name")
430
+ else ""
431
+ ),
432
+ "arguments": (
433
+ tool_call.function.arguments
434
+ if hasattr(tool_call.function, "arguments")
435
+ else ""
436
+ ),
437
+ }
438
+ tool_calls.append(tool_call_dict)
439
+ llm_prompts.append(tool_calls)
440
+ else:
441
+ llm_prompts.append(item)
442
+
425
443
  span_attributes = {
426
444
  "langtrace.sdk.name": "langtrace-python-sdk",
427
445
  "langtrace.service.name": service_provider,
@@ -430,13 +448,14 @@ def async_chat_completions_create(original_method, version, tracer):
430
448
  "langtrace.version": "1.0.0",
431
449
  "url.full": base_url,
432
450
  "llm.api": APIS["CHAT_COMPLETION"]["ENDPOINT"],
433
- "llm.prompts": json.dumps(kwargs.get("messages", [])),
451
+ "llm.prompts": json.dumps(llm_prompts),
434
452
  "llm.stream": kwargs.get("stream"),
435
453
  **(extra_attributes if extra_attributes is not None else {}),
436
454
  }
437
455
 
438
456
  attributes = LLMSpanAttributes(**span_attributes)
439
457
 
458
+ tools = []
440
459
  if kwargs.get("temperature") is not None:
441
460
  attributes.llm_temperature = kwargs.get("temperature")
442
461
  if kwargs.get("top_p") is not None:
@@ -444,7 +463,11 @@ def async_chat_completions_create(original_method, version, tracer):
444
463
  if kwargs.get("user") is not None:
445
464
  attributes.llm_user = kwargs.get("user")
446
465
  if kwargs.get("functions") is not None:
447
- attributes.llm_function_prompts = json.dumps(kwargs.get("functions"))
466
+ tools.append(json.dumps(kwargs.get("functions")))
467
+ if kwargs.get("tools") is not None:
468
+ tools.append(json.dumps(kwargs.get("tools")))
469
+ if len(tools) > 0:
470
+ attributes.llm_tools = json.dumps(tools)
448
471
 
449
472
  # TODO(Karthik): Gotta figure out how to handle streaming with context
450
473
  # with tracer.start_as_current_span(APIS["CHAT_COMPLETION"]["METHOD"],
@@ -469,16 +492,7 @@ def async_chat_completions_create(original_method, version, tracer):
469
492
  if choice.message and choice.message.role
470
493
  else "assistant"
471
494
  ),
472
- "content": (
473
- choice.message.content
474
- if choice.message and choice.message.content
475
- else (
476
- choice.message.function_call.arguments
477
- if choice.message
478
- and choice.message.function_call.arguments
479
- else ""
480
- )
481
- ),
495
+ "content": extract_content(choice),
482
496
  **(
483
497
  {
484
498
  "content_filter_results": choice[
@@ -536,6 +550,7 @@ def async_chat_completions_create(original_method, version, tracer):
536
550
  span,
537
551
  prompt_tokens,
538
552
  function_call=kwargs.get("functions") is not None,
553
+ tool_calls=kwargs.get("tools") is not None,
539
554
  )
540
555
 
541
556
  except Exception as error:
@@ -544,9 +559,7 @@ def async_chat_completions_create(original_method, version, tracer):
544
559
  span.end()
545
560
  raise
546
561
 
547
- async def ahandle_streaming_response(
548
- result, span, prompt_tokens, function_call=False
549
- ):
562
+ async def ahandle_streaming_response(result, span, prompt_tokens, function_call=False, tool_calls=False):
550
563
  """Process and yield streaming response chunks."""
551
564
  result_content = []
552
565
  span.add_event(Event.STREAM_START.value)
@@ -556,37 +569,29 @@ def async_chat_completions_create(original_method, version, tracer):
556
569
  if hasattr(chunk, "model") and chunk.model is not None:
557
570
  span.set_attribute("llm.model", chunk.model)
558
571
  if hasattr(chunk, "choices") and chunk.choices is not None:
559
- token_counts = [
560
- (
561
- estimate_tokens(choice.delta.content)
562
- if choice.delta and choice.delta.content
563
- else (
564
- estimate_tokens(choice.delta.function_call.arguments)
565
- if choice.delta.function_call
566
- and choice.delta.function_call.arguments
567
- else 0
568
- )
569
- )
570
- for choice in chunk.choices
571
- ]
572
- completion_tokens += sum(token_counts)
573
- content = [
574
- (
575
- choice.delta.content
576
- if choice.delta and choice.delta.content
577
- else (
578
- choice.delta.function_call.arguments
579
- if choice.delta.function_call
580
- and choice.delta.function_call.arguments
581
- else ""
582
- )
583
- )
584
- for choice in chunk.choices
585
- ]
572
+ if not function_call and not tool_calls:
573
+ for choice in chunk.choices:
574
+ if choice.delta and choice.delta.content is not None:
575
+ token_counts = estimate_tokens(choice.delta.content)
576
+ completion_tokens += token_counts
577
+ content = [choice.delta.content]
578
+ elif function_call:
579
+ for choice in chunk.choices:
580
+ if choice.delta and choice.delta.function_call and choice.delta.function_call.arguments is not None:
581
+ token_counts = estimate_tokens(choice.delta.function_call.arguments)
582
+ completion_tokens += token_counts
583
+ content = [
584
+ choice.delta.function_call.arguments
585
+ ]
586
+ elif tool_calls:
587
+ # TODO(Karthik): Tool calls streaming is tricky. The chunks after the
588
+ # first one are missing the function name and id though the arguments
589
+ # are spread across the chunks.
590
+ content = []
586
591
  else:
587
592
  content = []
588
593
  span.add_event(
589
- Event.STREAM_OUTPUT.value, {"response": "".join(content)}
594
+ Event.STREAM_OUTPUT.value, {"response": "".join(content) if len(content) > 0 and content[0] is not None else ""}
590
595
  )
591
596
  result_content.append(content[0] if len(content) > 0 else "")
592
597
  yield chunk
@@ -673,12 +678,12 @@ def embeddings_create(original_method, version, tracer):
673
678
  result = wrapped(*args, **kwargs)
674
679
  span.set_status(StatusCode.OK)
675
680
  return result
676
- except Exception as e:
681
+ except Exception as err:
677
682
  # Record the exception in the span
678
- span.record_exception(e)
683
+ span.record_exception(err)
679
684
 
680
685
  # Set the span status to indicate an error
681
- span.set_status(Status(StatusCode.ERROR, str(e)))
686
+ span.set_status(Status(StatusCode.ERROR, str(err)))
682
687
 
683
688
  # Reraise the exception to ensure it's not swallowed
684
689
  raise
@@ -736,14 +741,45 @@ def async_embeddings_create(original_method, version, tracer):
736
741
  result = await wrapped(*args, **kwargs)
737
742
  span.set_status(StatusCode.OK)
738
743
  return result
739
- except Exception as e:
744
+ except Exception as err:
740
745
  # Record the exception in the span
741
- span.record_exception(e)
746
+ span.record_exception(err)
742
747
 
743
748
  # Set the span status to indicate an error
744
- span.set_status(Status(StatusCode.ERROR, str(e)))
749
+ span.set_status(Status(StatusCode.ERROR, str(err)))
745
750
 
746
751
  # Reraise the exception to ensure it's not swallowed
747
752
  raise
748
753
 
749
754
  return traced_method
755
+
756
+
757
+ def extract_content(choice):
758
+ # Check if choice.message exists and has a content attribute
759
+ if hasattr(choice, 'message') and hasattr(choice.message, 'content') and choice.message.content is not None:
760
+ return choice.message.content
761
+
762
+ # Check if choice.message has tool_calls and extract information accordingly
763
+ elif hasattr(choice, 'message') and hasattr(choice.message, 'tool_calls') and choice.message.tool_calls is not None:
764
+ result = [
765
+ {
766
+ "id": tool_call.id,
767
+ "type": tool_call.type,
768
+ "function": {
769
+ "name": tool_call.function.name,
770
+ "arguments": tool_call.function.arguments,
771
+ }
772
+ } for tool_call in choice.message.tool_calls
773
+ ]
774
+ return result
775
+
776
+ # Check if choice.message has a function_call and extract information accordingly
777
+ elif hasattr(choice, 'message') and hasattr(choice.message, 'function_call') and choice.message.function_call is not None:
778
+ return {
779
+ "name": choice.message.function_call.name,
780
+ "arguments": choice.message.function_call.arguments,
781
+ }
782
+
783
+ # Return an empty string if none of the above conditions are met
784
+ else:
785
+ return ""
@@ -1 +1 @@
1
- __version__ = "1.3.3"
1
+ __version__ = "1.3.5"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: langtrace-python-sdk
3
- Version: 1.3.3
3
+ Version: 1.3.5
4
4
  Summary: Python SDK for LangTrace
5
5
  Project-URL: Homepage, https://github.com/Scale3-Labs/langtrace-python-sdk
6
6
  Author-email: Scale3 Labs <engineering@scale3labs.com>
@@ -15,7 +15,7 @@ Requires-Dist: opentelemetry-instrumentation
15
15
  Requires-Dist: opentelemetry-sdk
16
16
  Requires-Dist: pinecone-client
17
17
  Requires-Dist: tiktoken
18
- Requires-Dist: trace-attributes
18
+ Requires-Dist: trace-attributes==1.0.32
19
19
  Provides-Extra: dev
20
20
  Requires-Dist: anthropic; extra == 'dev'
21
21
  Requires-Dist: chromadb; extra == 'dev'
@@ -17,16 +17,20 @@ examples/llamaindex_example/agent.py,sha256=_iIXy9lfDz6ySf6aTeeRqejlfGnXZ7msxLBj
17
17
  examples/llamaindex_example/basic.py,sha256=gvns3oDUy0c4I5ewnj9-B36_1La8y6qD3VQaq6v3syM,654
18
18
  examples/llamaindex_example/data/abramov.txt,sha256=Ou-GyWZm5AjHLgxviBoRE9ikNv5MScsF0cd--0vVVhI,32667
19
19
  examples/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- examples/openai/chat_completion.py,sha256=1AiLIr0sWBKcqzhCbTJkspZyuGDZATxhbKBCKjzvM-E,1731
20
+ examples/openai/async_tool_calling_nonstreaming.py,sha256=foomPKwpju0PMazdE3xNrdZWguUYMnNXibqq01-uBvc,3806
21
+ examples/openai/async_tool_calling_streaming.py,sha256=TayOsb0jcYYlFpnWWUYxwCVKuJhTfGfE8LkjYJNGSz4,6900
22
+ examples/openai/chat_completion.py,sha256=B7djvaprhEhegKqQxAh95yhALIYVtanWid_w75uredg,1754
21
23
  examples/openai/embeddings_create.py,sha256=AhDNAqg-WzRYLJAE_b2RKGjuVCh4aZSU7MxcZv2kCHQ,518
22
- examples/openai/function_calling.py,sha256=qsbmlkWnOvdemekMqsB0roBhpn_GsNBWrR2td0vVEIs,2335
24
+ examples/openai/function_calling.py,sha256=6Nm1ZjP4iKx1Za7ch3zIciQ5zcXWBb2-mpYfIhPs8oo,2320
23
25
  examples/openai/images_generate.py,sha256=ZioxTuHKE_yYlhpESqXKVzdkiwdegkmLVB7N8T2LU00,506
26
+ examples/openai/tool_calling_nonstreaming.py,sha256=MxjUGD6Q2zg522E6kymGvXOikoL3qMoZf6pLQgws8zw,3776
27
+ examples/openai/tool_calling_streaming.py,sha256=WnWWlgDqKuqN2DtWbpJs_JvmmQehBZp0Ke3ZXvCJdQw,6860
24
28
  examples/perplexity_example/basic.py,sha256=oTLwEYlvpD4wEnqEUrUSlQ0SeQ0u50Jeab4ggkikQg0,671
25
29
  examples/pinecone_example/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
30
  examples/pinecone_example/basic.py,sha256=OkYjN3J5kxw-kloOV3Q-iyI6opkbarWsMom-_AMP2ZA,893
27
31
  langtrace_python_sdk/__init__.py,sha256=SlHg447-nQBbw8exRNJP_OyHUZ39Sldb7aaQ35hIRm8,262
28
32
  langtrace_python_sdk/langtrace.py,sha256=83-AkdASO7UF9FHR9BDZUSeYv9GFZkJJQD2YLKbqzo8,3562
29
- langtrace_python_sdk/version.py,sha256=Vi6om3KImlKsS_Wg5CjUgYffoi2zx7T-SRPnnGL0G7M,22
33
+ langtrace_python_sdk/version.py,sha256=tdqvkGH0OryRjjXzO3HS5DyYol-VTO9fC8m43nB2PgI,22
30
34
  langtrace_python_sdk/constants/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
35
  langtrace_python_sdk/constants/exporter/langtrace_exporter.py,sha256=5MNjnAOg-4am78J3gVMH6FSwq5N8TOj72ugkhsw4vi0,46
32
36
  langtrace_python_sdk/constants/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -62,14 +66,14 @@ langtrace_python_sdk/instrumentation/llamaindex/instrumentation.py,sha256=D7_HPv
62
66
  langtrace_python_sdk/instrumentation/llamaindex/patch.py,sha256=8IM2dedF81w8_vVyA56JptyvlQl_bQO4UcB56sptuGs,3700
63
67
  langtrace_python_sdk/instrumentation/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
68
  langtrace_python_sdk/instrumentation/openai/instrumentation.py,sha256=Pv4n4z_kSxvZGVxrj3AopBoWQSxIOtMKolkxHrchRdM,2162
65
- langtrace_python_sdk/instrumentation/openai/patch.py,sha256=6hZ8ExTHHUnib8sU5k5OXt9zFKl3iTL2-PzsHJxW5ao,31587
69
+ langtrace_python_sdk/instrumentation/openai/patch.py,sha256=ZxR6hLXbf05rw7tccNsEQv-uB8Zb8QvOzTJk1coJbkY,33819
66
70
  langtrace_python_sdk/instrumentation/pinecone/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
67
71
  langtrace_python_sdk/instrumentation/pinecone/instrumentation.py,sha256=o0EUd5jvHaDKOUTj4NjnL5UfDHDHxyXkWGlTW4oeRDk,1784
68
72
  langtrace_python_sdk/instrumentation/pinecone/patch.py,sha256=5lF7hQmg2-U2EWtOC0w8_peRaNMysBomb0fjiNoS6eQ,2200
69
73
  langtrace_python_sdk/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
70
74
  langtrace_python_sdk/utils/llm.py,sha256=4z2e-md_ELXCEuOIRVWracR6qH2pmsOxCqpkuF9_3Nw,1589
71
75
  langtrace_python_sdk/utils/with_root_span.py,sha256=N7ONrcF0myZbHBy5gpQffDbX-Kf63Crsz9szG0i3m08,1889
72
- langtrace_python_sdk-1.3.3.dist-info/METADATA,sha256=SQOqeK4DcO0lKolsayInA_t-pr3Stk26sH742xYFDT0,9086
73
- langtrace_python_sdk-1.3.3.dist-info/WHEEL,sha256=K0BPUNF1N3kQ9olb8aVEtkObePEjdr2JOLT1N83EVws,87
74
- langtrace_python_sdk-1.3.3.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
75
- langtrace_python_sdk-1.3.3.dist-info/RECORD,,
76
+ langtrace_python_sdk-1.3.5.dist-info/METADATA,sha256=yUAeoRPdsjv_i4VUE0lwzH10O1uNWt9-S-qj8yMhRAI,9094
77
+ langtrace_python_sdk-1.3.5.dist-info/WHEEL,sha256=osohxoshIHTFJFVPhsi1UkZuLRGMHRXZzwEBW2ezjrc,87
78
+ langtrace_python_sdk-1.3.5.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
79
+ langtrace_python_sdk-1.3.5.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.24.0
2
+ Generator: hatchling 1.24.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any