langtrace-python-sdk 1.3.4__py3-none-any.whl → 1.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. examples/openai/async_tool_calling_nonstreaming.py +93 -0
  2. examples/openai/async_tool_calling_streaming.py +167 -0
  3. examples/openai/chat_completion.py +15 -16
  4. examples/openai/function_calling.py +14 -14
  5. examples/openai/tool_calling_nonstreaming.py +92 -0
  6. examples/openai/tool_calling_streaming.py +167 -0
  7. langtrace_python_sdk/instrumentation/openai/patch.py +175 -99
  8. langtrace_python_sdk/version.py +1 -1
  9. {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/METADATA +5 -1
  10. {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/RECORD +29 -8
  11. {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/WHEEL +1 -1
  12. tests/__init__.py +0 -0
  13. tests/anthropic/test_anthropic.py +73 -0
  14. tests/chroma/test_chroma.py +64 -0
  15. tests/langchain/test_langchain.py +69 -0
  16. tests/langchain/test_langchain_community.py +69 -0
  17. tests/langchain/test_langchain_core.py +115 -0
  18. tests/openai/cassettes/test_async_chat_completion_streaming.yaml +158 -0
  19. tests/openai/cassettes/test_async_image_generation.yaml +97 -0
  20. tests/openai/cassettes/test_chat_completion.yaml +101 -0
  21. tests/openai/cassettes/test_chat_completion_streaming.yaml +200860 -0
  22. tests/openai/cassettes/test_image_generation.yaml +97 -0
  23. tests/openai/conftest.py +45 -0
  24. tests/openai/test_chat_completion.py +142 -0
  25. tests/openai/test_embeddings.py +0 -0
  26. tests/openai/test_image_generation.py +77 -0
  27. tests/pinecone/test_pinecone.py +72 -0
  28. tests/utils.py +21 -0
  29. {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,93 @@
1
+ import json
2
+
3
+ from dotenv import find_dotenv, load_dotenv
4
+ from openai import AsyncOpenAI
5
+
6
+ from langtrace_python_sdk import langtrace
7
+
8
+ # from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
9
+
10
+ _ = load_dotenv(find_dotenv())
11
+
12
+ langtrace.init(write_to_langtrace_cloud=False)
13
+
14
+ client = AsyncOpenAI()
15
+
16
+ # Example dummy function hard coded to return the same weather
17
+ # In production, this could be your backend API or an external API
18
+ def get_current_weather(location, unit="fahrenheit"):
19
+ """Get the current weather in a given location"""
20
+ if "tokyo" in location.lower():
21
+ return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
22
+ elif "san francisco" in location.lower():
23
+ return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
24
+ elif "paris" in location.lower():
25
+ return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
26
+ else:
27
+ return json.dumps({"location": location, "temperature": "unknown"})
28
+
29
+
30
+ async def run_conversation():
31
+ # Step 1: send the conversation and available functions to the model
32
+ messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
33
+ tools = [
34
+ {
35
+ "type": "function",
36
+ "function": {
37
+ "name": "get_current_weather",
38
+ "description": "Get the current weather in a given location",
39
+ "parameters": {
40
+ "type": "object",
41
+ "properties": {
42
+ "location": {
43
+ "type": "string",
44
+ "description": "The city and state, e.g. San Francisco, CA",
45
+ },
46
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
47
+ },
48
+ "required": ["location"],
49
+ },
50
+ },
51
+ }
52
+ ]
53
+ response = await client.chat.completions.create(
54
+ model="gpt-3.5-turbo-0125",
55
+ messages=messages,
56
+ tools=tools,
57
+ tool_choice="auto", # auto is default, but we'll be explicit
58
+ )
59
+ # print(response)
60
+ response_message = response.choices[0].message
61
+ tool_calls = response_message.tool_calls
62
+ # Step 2: check if the model wanted to call a function
63
+ if tool_calls:
64
+ # Step 3: call the function
65
+ # Note: the JSON response may not always be valid; be sure to handle errors
66
+ available_functions = {
67
+ "get_current_weather": get_current_weather,
68
+ } # only one function in this example, but you can have multiple
69
+ messages.append(response_message) # extend conversation with assistant's reply
70
+ # Step 4: send the info for each function call and function response to the model
71
+ for tool_call in tool_calls:
72
+ function_name = tool_call.function.name
73
+ function_to_call = available_functions[function_name]
74
+ function_args = json.loads(tool_call.function.arguments)
75
+ function_response = function_to_call(
76
+ location=function_args.get("location"),
77
+ unit=function_args.get("unit"),
78
+ )
79
+ messages.append(
80
+ {
81
+ "tool_call_id": tool_call.id,
82
+ "role": "tool",
83
+ "name": function_name,
84
+ "content": function_response,
85
+ }
86
+ ) # extend conversation with function response
87
+ second_response = await client.chat.completions.create(
88
+ model="gpt-3.5-turbo-0125",
89
+ messages=messages,
90
+ ) # get a new response from the model where it can see the function response
91
+ # print(second_response)
92
+ return second_response
93
+
@@ -0,0 +1,167 @@
1
+ import json
2
+
3
+ from dotenv import find_dotenv, load_dotenv
4
+ from openai import AsyncOpenAI
5
+
6
+ from langtrace_python_sdk import langtrace
7
+
8
+ # from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
9
+
10
+ _ = load_dotenv(find_dotenv())
11
+
12
+ langtrace.init(write_to_langtrace_cloud=False)
13
+
14
+ client = AsyncOpenAI()
15
+
16
+
17
+ # Example dummy function hard coded to return the same weather
18
+ # In production, this could be your backend API or an external API
19
+ def get_current_weather(location, unit="fahrenheit"):
20
+ """Get the current weather in a given location"""
21
+ if "tokyo" in location.lower():
22
+ return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
23
+ elif "san francisco" in location.lower():
24
+ return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
25
+ elif "paris" in location.lower():
26
+ return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
27
+ else:
28
+ return json.dumps({"location": location, "temperature": "unknown"})
29
+
30
+
31
+ def get_current_time(location):
32
+ """Get the current time in a given location"""
33
+ if "tokyo" in location.lower():
34
+ return json.dumps({"location": "Tokyo", "time": "10"})
35
+ elif "san francisco" in location.lower():
36
+ return json.dumps({"location": "San Francisco", "time": "72"})
37
+ elif "paris" in location.lower():
38
+ return json.dumps({"location": "Paris", "time": "22"})
39
+ else:
40
+ return json.dumps({"location": location, "time": "unknown"})
41
+
42
+
43
+ async def run_conversation():
44
+ # Step 1: send the conversation and available functions to the model
45
+ messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
46
+ tools = [
47
+ {
48
+ "type": "function",
49
+ "function": {
50
+ "name": "get_current_weather",
51
+ "description": "Get the current weather in a given location",
52
+ "parameters": {
53
+ "type": "object",
54
+ "properties": {
55
+ "location": {
56
+ "type": "string",
57
+ "description": "The city and state, e.g. San Francisco, CA",
58
+ },
59
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
60
+ },
61
+ "required": ["location"],
62
+ },
63
+ },
64
+ },
65
+ {
66
+ "type": "function",
67
+ "function": {
68
+ "name": "get_current_time",
69
+ "description": "Get the current time in a given location",
70
+ "parameters": {
71
+ "type": "object",
72
+ "properties": {
73
+ "location": {
74
+ "type": "string",
75
+ "description": "The city and state, e.g. San Francisco, CA",
76
+ },
77
+ },
78
+ "required": ["location"],
79
+ },
80
+ },
81
+ }
82
+ ]
83
+ response = await client.chat.completions.create(
84
+ model="gpt-4",
85
+ messages=messages,
86
+ tools=tools,
87
+ tool_choice="auto", # auto is default, but we'll be explicit
88
+ stream=True,
89
+ )
90
+
91
+ # For streaming, uncomment the following lines
92
+ tool_call_dict = {}
93
+ tool_calls = []
94
+ id = ""
95
+ name = ""
96
+ arguments = ""
97
+ async for chunk in response:
98
+ if chunk.choices[0].delta is not None and chunk.choices[0].delta.tool_calls is not None:
99
+ for choice in chunk.choices:
100
+ for tool_call in choice.delta.tool_calls:
101
+ if tool_call.id and id != tool_call.id:
102
+ id = tool_call.id if tool_call.id else ""
103
+ name = tool_call.function.name if tool_call.function and tool_call.function.name else ""
104
+ tool_call_dict[name] = {
105
+ "id": id,
106
+ "function": {
107
+ "name": name,
108
+ "arguments": arguments
109
+ },
110
+ "type": "function"
111
+ }
112
+ arguments += tool_call.function.arguments if tool_call.function and tool_call.function.arguments else ""
113
+ if name != "":
114
+ tool_call_dict[name] = {
115
+ "id": id,
116
+ "function": {
117
+ "name": name,
118
+ "arguments": arguments
119
+ },
120
+ "type": "function"
121
+ }
122
+ for key, value in tool_call_dict.items():
123
+ tool_calls.append(value)
124
+
125
+ # Step 2: check if the model wanted to call a function
126
+ if tool_calls:
127
+ # Step 3: call the function
128
+ # Note: the JSON response may not always be valid; be sure to handle errors
129
+ available_functions = {
130
+ "get_current_weather": get_current_weather,
131
+ "get_current_time": get_current_time,
132
+ } # only one function in this example, but you can have multiple
133
+ # messages.append(response_message) # extend conversation with assistant's reply
134
+ # Step 4: send the info for each function call and function response to the model
135
+ for tool_call in tool_calls:
136
+ function_name = tool_call['function']['name']
137
+ function_to_call = available_functions[function_name]
138
+ function_args = json.loads(tool_call['function']['arguments'])
139
+ function_response = function_to_call(
140
+ location=function_args.get("location"),
141
+ unit=function_args.get("unit"),
142
+ )
143
+ func_res = json.loads(function_response)
144
+ content = f"Use the below information to answer the user's question: The current weather in {func_res['location']} is {func_res['temperature']} degrees {func_res['unit']}"
145
+ messages.append(
146
+ {
147
+ "role": "system",
148
+ "content": content
149
+ }
150
+ ) # extend conversation with function response
151
+ print(messages)
152
+ second_response = await client.chat.completions.create(
153
+ model="gpt-4",
154
+ messages=messages,
155
+ stream=True,
156
+ ) # get a new response from the model where it can see the function response
157
+ result = []
158
+ async for chunk in second_response:
159
+ if chunk.choices[0].delta.content is not None:
160
+ content = [
161
+ choice.delta.content if choice.delta and
162
+ choice.delta.content else ""
163
+ for choice in chunk.choices]
164
+ result.append(
165
+ content[0] if len(content) > 0 else "")
166
+ print("".join(result))
167
+ # return second_response
@@ -7,7 +7,7 @@ from langtrace_python_sdk.utils.with_root_span import (
7
7
 
8
8
  _ = load_dotenv(find_dotenv())
9
9
 
10
- langtrace.init(write_to_langtrace_cloud=True)
10
+ langtrace.init(write_to_langtrace_cloud=False)
11
11
  client = OpenAI()
12
12
 
13
13
 
@@ -26,7 +26,7 @@ def api2():
26
26
  response = client.chat.completions.create(
27
27
  model="gpt-4",
28
28
  messages=[{"role": "user", "content": "Say this is a test three times"}],
29
- stream=False,
29
+ stream=True,
30
30
  )
31
31
  return response
32
32
 
@@ -35,25 +35,24 @@ def api2():
35
35
  def chat_completion():
36
36
  response = api1()
37
37
  response = api2()
38
- return response
38
+ result = []
39
+ for chunk in response:
40
+ if chunk.choices[0].delta.content is not None:
41
+ content = [
42
+ choice.delta.content if choice.delta and
43
+ choice.delta.content else ""
44
+ for choice in chunk.choices]
45
+ result.append(
46
+ content[0] if len(content) > 0 else "")
39
47
 
48
+ print("".join(result))
49
+ # return response
40
50
 
41
- # print(response)
51
+
52
+ # # print(response)
42
53
  # stream = client.chat.completions.create(
43
54
  # model="gpt-4",
44
55
  # messages=[{"role": "user", "content": "Say this is a test three times"}, {"role": "assistant", "content": "This is a test. This is a test. This is a test"},
45
56
  # {"role": "user", "content": "Say this is a mock 4 times"}],
46
57
  # stream=False,
47
58
  # )
48
-
49
- # result = []
50
- # for chunk in response:
51
- # if chunk.choices[0].delta.content is not None:
52
- # content = [
53
- # choice.delta.content if choice.delta and
54
- # choice.delta.content else ""
55
- # for choice in chunk.choices]
56
- # result.append(
57
- # content[0] if len(content) > 0 else "")
58
-
59
- # print("".join(result))
@@ -34,7 +34,7 @@ student_custom_functions = [
34
34
  ]
35
35
 
36
36
 
37
- @with_langtrace_root_span()
37
+ # @with_langtrace_root_span()
38
38
  def function_calling():
39
39
  response = client.chat.completions.create(
40
40
  model="gpt-3.5-turbo",
@@ -46,21 +46,21 @@ def function_calling():
46
46
  ],
47
47
  functions=student_custom_functions,
48
48
  function_call="auto",
49
- stream=False,
49
+ stream=True,
50
50
  )
51
51
 
52
- # result = []
53
- # for chunk in response:
54
- # if chunk.choices[0].delta.function_call is not None:
55
- # content = [
56
- # choice.delta.function_call.arguments if choice.delta.function_call and
57
- # choice.delta.function_call.arguments else ""
58
- # for choice in chunk.choices]
59
- # result.append(
60
- # content[0] if len(content) > 0 else "")
52
+ result = []
53
+ for chunk in response:
54
+ if chunk.choices[0].delta.function_call is not None:
55
+ content = [
56
+ choice.delta.function_call.arguments if choice.delta.function_call and
57
+ choice.delta.function_call.arguments else ""
58
+ for choice in chunk.choices]
59
+ result.append(
60
+ content[0] if len(content) > 0 else "")
61
61
 
62
- # print("".join(result))
62
+ print("".join(result))
63
63
 
64
64
  # Loading the response as a JSON object
65
- json_response = json.loads(response.choices[0].message.function_call.arguments)
66
- print(json_response)
65
+ # json_response = json.loads(response.choices[0].message.function_call.arguments)
66
+ # print(json_response)
@@ -0,0 +1,92 @@
1
+ import json
2
+
3
+ from dotenv import find_dotenv, load_dotenv
4
+ from openai import OpenAI
5
+
6
+ from langtrace_python_sdk import langtrace
7
+
8
+ # from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
9
+
10
+ _ = load_dotenv(find_dotenv())
11
+
12
+ langtrace.init(write_to_langtrace_cloud=False)
13
+
14
+ client = OpenAI()
15
+
16
+ # Example dummy function hard coded to return the same weather
17
+ # In production, this could be your backend API or an external API
18
+ def get_current_weather(location, unit="fahrenheit"):
19
+ """Get the current weather in a given location"""
20
+ if "tokyo" in location.lower():
21
+ return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
22
+ elif "san francisco" in location.lower():
23
+ return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
24
+ elif "paris" in location.lower():
25
+ return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
26
+ else:
27
+ return json.dumps({"location": location, "temperature": "unknown"})
28
+
29
+
30
+ def run_conversation():
31
+ # Step 1: send the conversation and available functions to the model
32
+ messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
33
+ tools = [
34
+ {
35
+ "type": "function",
36
+ "function": {
37
+ "name": "get_current_weather",
38
+ "description": "Get the current weather in a given location",
39
+ "parameters": {
40
+ "type": "object",
41
+ "properties": {
42
+ "location": {
43
+ "type": "string",
44
+ "description": "The city and state, e.g. San Francisco, CA",
45
+ },
46
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
47
+ },
48
+ "required": ["location"],
49
+ },
50
+ },
51
+ }
52
+ ]
53
+ response = client.chat.completions.create(
54
+ model="gpt-3.5-turbo-0125",
55
+ messages=messages,
56
+ tools=tools,
57
+ tool_choice="auto", # auto is default, but we'll be explicit
58
+ )
59
+ # print(response)
60
+ response_message = response.choices[0].message
61
+ tool_calls = response_message.tool_calls
62
+ # Step 2: check if the model wanted to call a function
63
+ if tool_calls:
64
+ # Step 3: call the function
65
+ # Note: the JSON response may not always be valid; be sure to handle errors
66
+ available_functions = {
67
+ "get_current_weather": get_current_weather,
68
+ } # only one function in this example, but you can have multiple
69
+ messages.append(response_message) # extend conversation with assistant's reply
70
+ # Step 4: send the info for each function call and function response to the model
71
+ for tool_call in tool_calls:
72
+ function_name = tool_call.function.name
73
+ function_to_call = available_functions[function_name]
74
+ function_args = json.loads(tool_call.function.arguments)
75
+ function_response = function_to_call(
76
+ location=function_args.get("location"),
77
+ unit=function_args.get("unit"),
78
+ )
79
+ messages.append(
80
+ {
81
+ "tool_call_id": tool_call.id,
82
+ "role": "tool",
83
+ "name": function_name,
84
+ "content": function_response,
85
+ }
86
+ ) # extend conversation with function response
87
+ second_response = client.chat.completions.create(
88
+ model="gpt-3.5-turbo-0125",
89
+ messages=messages,
90
+ ) # get a new response from the model where it can see the function response
91
+ # print(second_response)
92
+ return second_response
@@ -0,0 +1,167 @@
1
+ import json
2
+
3
+ from dotenv import find_dotenv, load_dotenv
4
+ from openai import OpenAI
5
+
6
+ from langtrace_python_sdk import langtrace
7
+
8
+ # from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
9
+
10
+ _ = load_dotenv(find_dotenv())
11
+
12
+ langtrace.init(write_to_langtrace_cloud=False)
13
+
14
+ client = OpenAI()
15
+
16
+
17
+ # Example dummy function hard coded to return the same weather
18
+ # In production, this could be your backend API or an external API
19
+ def get_current_weather(location, unit="fahrenheit"):
20
+ """Get the current weather in a given location"""
21
+ if "tokyo" in location.lower():
22
+ return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
23
+ elif "san francisco" in location.lower():
24
+ return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
25
+ elif "paris" in location.lower():
26
+ return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
27
+ else:
28
+ return json.dumps({"location": location, "temperature": "unknown"})
29
+
30
+
31
+ def get_current_time(location):
32
+ """Get the current time in a given location"""
33
+ if "tokyo" in location.lower():
34
+ return json.dumps({"location": "Tokyo", "time": "10"})
35
+ elif "san francisco" in location.lower():
36
+ return json.dumps({"location": "San Francisco", "time": "72"})
37
+ elif "paris" in location.lower():
38
+ return json.dumps({"location": "Paris", "time": "22"})
39
+ else:
40
+ return json.dumps({"location": location, "time": "unknown"})
41
+
42
+
43
+ def run_conversation():
44
+ # Step 1: send the conversation and available functions to the model
45
+ messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
46
+ tools = [
47
+ {
48
+ "type": "function",
49
+ "function": {
50
+ "name": "get_current_weather",
51
+ "description": "Get the current weather in a given location",
52
+ "parameters": {
53
+ "type": "object",
54
+ "properties": {
55
+ "location": {
56
+ "type": "string",
57
+ "description": "The city and state, e.g. San Francisco, CA",
58
+ },
59
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
60
+ },
61
+ "required": ["location"],
62
+ },
63
+ },
64
+ },
65
+ {
66
+ "type": "function",
67
+ "function": {
68
+ "name": "get_current_time",
69
+ "description": "Get the current time in a given location",
70
+ "parameters": {
71
+ "type": "object",
72
+ "properties": {
73
+ "location": {
74
+ "type": "string",
75
+ "description": "The city and state, e.g. San Francisco, CA",
76
+ },
77
+ },
78
+ "required": ["location"],
79
+ },
80
+ },
81
+ }
82
+ ]
83
+ response = client.chat.completions.create(
84
+ model="gpt-4",
85
+ messages=messages,
86
+ tools=tools,
87
+ tool_choice="auto", # auto is default, but we'll be explicit
88
+ stream=True,
89
+ )
90
+
91
+ # For streaming, uncomment the following lines
92
+ tool_call_dict = {}
93
+ tool_calls = []
94
+ id = ""
95
+ name = ""
96
+ arguments = ""
97
+ for chunk in response:
98
+ if chunk.choices[0].delta is not None and chunk.choices[0].delta.tool_calls is not None:
99
+ for choice in chunk.choices:
100
+ for tool_call in choice.delta.tool_calls:
101
+ if tool_call.id and id != tool_call.id:
102
+ id = tool_call.id if tool_call.id else ""
103
+ name = tool_call.function.name if tool_call.function and tool_call.function.name else ""
104
+ tool_call_dict[name] = {
105
+ "id": id,
106
+ "function": {
107
+ "name": name,
108
+ "arguments": arguments
109
+ },
110
+ "type": "function"
111
+ }
112
+ arguments += tool_call.function.arguments if tool_call.function and tool_call.function.arguments else ""
113
+ if name != "":
114
+ tool_call_dict[name] = {
115
+ "id": id,
116
+ "function": {
117
+ "name": name,
118
+ "arguments": arguments
119
+ },
120
+ "type": "function"
121
+ }
122
+ for key, value in tool_call_dict.items():
123
+ tool_calls.append(value)
124
+
125
+ # Step 2: check if the model wanted to call a function
126
+ if tool_calls:
127
+ # Step 3: call the function
128
+ # Note: the JSON response may not always be valid; be sure to handle errors
129
+ available_functions = {
130
+ "get_current_weather": get_current_weather,
131
+ "get_current_time": get_current_time,
132
+ } # only one function in this example, but you can have multiple
133
+ # messages.append(response_message) # extend conversation with assistant's reply
134
+ # Step 4: send the info for each function call and function response to the model
135
+ for tool_call in tool_calls:
136
+ function_name = tool_call['function']['name']
137
+ function_to_call = available_functions[function_name]
138
+ function_args = json.loads(tool_call['function']['arguments'])
139
+ function_response = function_to_call(
140
+ location=function_args.get("location"),
141
+ unit=function_args.get("unit"),
142
+ )
143
+ func_res = json.loads(function_response)
144
+ content = f"Use the below information to answer the user's question: The current weather in {func_res['location']} is {func_res['temperature']} degrees {func_res['unit']}"
145
+ messages.append(
146
+ {
147
+ "role": "system",
148
+ "content": content
149
+ }
150
+ ) # extend conversation with function response
151
+ print(messages)
152
+ second_response = client.chat.completions.create(
153
+ model="gpt-4",
154
+ messages=messages,
155
+ stream=True,
156
+ ) # get a new response from the model where it can see the function response
157
+ result = []
158
+ for chunk in second_response:
159
+ if chunk.choices[0].delta.content is not None:
160
+ content = [
161
+ choice.delta.content if choice.delta and
162
+ choice.delta.content else ""
163
+ for choice in chunk.choices]
164
+ result.append(
165
+ content[0] if len(content) > 0 else "")
166
+ print("".join(result))
167
+ # return second_response