langtrace-python-sdk 2.0.3__py3-none-any.whl → 2.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. examples/anthropic_example/completion.py +1 -1
  2. examples/chroma_example/basic.py +1 -1
  3. examples/cohere_example/chat.py +7 -3
  4. examples/cohere_example/chat_stream.py +7 -2
  5. examples/cohere_example/embed.py +2 -1
  6. examples/cohere_example/rerank.py +2 -1
  7. examples/cohere_example/tools.py +21 -5
  8. examples/fastapi_example/basic_route.py +1 -1
  9. examples/hiveagent_example/basic.py +1 -1
  10. examples/langchain_example/groq_example.py +3 -1
  11. examples/langchain_example/langgraph_example.py +11 -12
  12. examples/llamaindex_example/agent.py +1 -1
  13. examples/llamaindex_example/basic.py +1 -1
  14. examples/openai_example/async_tool_calling_nonstreaming.py +11 -4
  15. examples/openai_example/async_tool_calling_streaming.py +41 -29
  16. examples/openai_example/chat_completion.py +12 -8
  17. examples/openai_example/embeddings_create.py +2 -1
  18. examples/openai_example/function_calling.py +11 -6
  19. examples/openai_example/images_generate.py +2 -1
  20. examples/openai_example/tool_calling.py +1 -1
  21. examples/openai_example/tool_calling_nonstreaming.py +11 -3
  22. examples/openai_example/tool_calling_streaming.py +42 -29
  23. examples/perplexity_example/basic.py +1 -1
  24. examples/pinecone_example/basic.py +4 -1
  25. examples/qdrant_example/basic.py +8 -6
  26. langtrace_python_sdk/constants/instrumentation/groq.py +0 -2
  27. langtrace_python_sdk/extensions/langtrace_exporter.py +4 -12
  28. langtrace_python_sdk/instrumentation/anthropic/instrumentation.py +1 -2
  29. langtrace_python_sdk/instrumentation/anthropic/patch.py +14 -4
  30. langtrace_python_sdk/instrumentation/chroma/patch.py +4 -2
  31. langtrace_python_sdk/instrumentation/cohere/instrumentation.py +6 -3
  32. langtrace_python_sdk/instrumentation/groq/instrumentation.py +3 -1
  33. langtrace_python_sdk/instrumentation/groq/patch.py +26 -11
  34. langtrace_python_sdk/instrumentation/langchain/patch.py +4 -2
  35. langtrace_python_sdk/instrumentation/langchain_community/instrumentation.py +1 -2
  36. langtrace_python_sdk/instrumentation/langchain_community/patch.py +4 -3
  37. langtrace_python_sdk/instrumentation/langchain_core/instrumentation.py +3 -1
  38. langtrace_python_sdk/instrumentation/langchain_core/patch.py +4 -2
  39. langtrace_python_sdk/instrumentation/langgraph/instrumentation.py +17 -8
  40. langtrace_python_sdk/instrumentation/langgraph/patch.py +47 -26
  41. langtrace_python_sdk/instrumentation/llamaindex/patch.py +3 -1
  42. langtrace_python_sdk/instrumentation/openai/instrumentation.py +7 -3
  43. langtrace_python_sdk/instrumentation/openai/patch.py +40 -19
  44. langtrace_python_sdk/instrumentation/pinecone/patch.py +4 -2
  45. langtrace_python_sdk/instrumentation/qdrant/patch.py +4 -2
  46. langtrace_python_sdk/langtrace.py +128 -64
  47. langtrace_python_sdk/types/__init__.py +29 -0
  48. langtrace_python_sdk/utils/llm.py +2 -4
  49. langtrace_python_sdk/utils/with_root_span.py +3 -3
  50. langtrace_python_sdk/version.py +1 -1
  51. {langtrace_python_sdk-2.0.3.dist-info → langtrace_python_sdk-2.0.5.dist-info}/METADATA +2 -2
  52. {langtrace_python_sdk-2.0.3.dist-info → langtrace_python_sdk-2.0.5.dist-info}/RECORD +59 -58
  53. tests/chroma/test_chroma.py +26 -20
  54. tests/langchain/test_langchain.py +29 -16
  55. tests/langchain/test_langchain_community.py +28 -15
  56. tests/langchain/test_langchain_core.py +52 -26
  57. tests/pinecone/test_pinecone.py +27 -18
  58. {langtrace_python_sdk-2.0.3.dist-info → langtrace_python_sdk-2.0.5.dist-info}/WHEEL +0 -0
  59. {langtrace_python_sdk-2.0.3.dist-info → langtrace_python_sdk-2.0.5.dist-info}/licenses/LICENSE +0 -0
@@ -7,7 +7,7 @@ from langtrace_python_sdk import langtrace, with_langtrace_root_span
7
7
 
8
8
  _ = load_dotenv(find_dotenv())
9
9
 
10
- langtrace.init(write_to_langtrace_cloud=False)
10
+ langtrace.init(write_spans_to_console=True)
11
11
 
12
12
 
13
13
  @with_langtrace_root_span("messages_create")
@@ -7,7 +7,7 @@ from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
7
7
 
8
8
  _ = load_dotenv(find_dotenv())
9
9
 
10
- langtrace.init(write_to_langtrace_cloud=False)
10
+ langtrace.init(write_spans_to_console=True)
11
11
 
12
12
 
13
13
  @with_langtrace_root_span()
@@ -7,7 +7,8 @@ from langtrace_python_sdk import langtrace
7
7
 
8
8
  _ = load_dotenv(find_dotenv())
9
9
 
10
- langtrace.init(write_to_langtrace_cloud=False)
10
+ langtrace.init(write_spans_to_console=True)
11
+
11
12
 
12
13
  co = cohere.Client()
13
14
 
@@ -17,11 +18,14 @@ def chat_comp():
17
18
  response = co.chat(
18
19
  chat_history=[
19
20
  {"role": "USER", "message": "Who discovered gravity?"},
20
- {"role": "CHATBOT", "message": "The man who is widely credited with discovering gravity is Sir Isaac Newton"}
21
+ {
22
+ "role": "CHATBOT",
23
+ "message": "The man who is widely credited with discovering gravity is Sir Isaac Newton",
24
+ },
21
25
  ],
22
26
  message="Tell me a story in 3 sentences or less?",
23
27
  preamble="answer like a pirate",
24
28
  # perform web search before answering the question. You can also use your own custom connector.
25
- connectors=[{"id": "web-search"}]
29
+ connectors=[{"id": "web-search"}],
26
30
  )
27
31
  print(response)
@@ -5,7 +5,8 @@ from langtrace_python_sdk import langtrace
5
5
 
6
6
  _ = load_dotenv(find_dotenv())
7
7
 
8
- langtrace.init(write_to_langtrace_cloud=False)
8
+ langtrace.init(write_spans_to_console=True)
9
+
9
10
 
10
11
  co = cohere.Client()
11
12
 
@@ -13,7 +14,11 @@ co = cohere.Client()
13
14
  # @with_langtrace_root_span("chat_stream")
14
15
  def chat_stream():
15
16
  result = []
16
- for event in co.chat_stream(message="Tell me a short story in 2 lines", preamble="Respond like a pirate", max_tokens=100):
17
+ for event in co.chat_stream(
18
+ message="Tell me a short story in 2 lines",
19
+ preamble="Respond like a pirate",
20
+ max_tokens=100,
21
+ ):
17
22
  if event.event_type == "text-generation":
18
23
  result.append(event.text)
19
24
  elif event.event_type == "stream-end":
@@ -7,7 +7,8 @@ from langtrace_python_sdk import langtrace
7
7
 
8
8
  _ = load_dotenv(find_dotenv())
9
9
 
10
- langtrace.init(write_to_langtrace_cloud=False)
10
+ langtrace.init(write_spans_to_console=True)
11
+
11
12
 
12
13
  co = cohere.Client()
13
14
 
@@ -7,7 +7,8 @@ from langtrace_python_sdk import langtrace
7
7
 
8
8
  _ = load_dotenv(find_dotenv())
9
9
 
10
- langtrace.init(write_to_langtrace_cloud=False)
10
+ langtrace.init(write_spans_to_console=True)
11
+
11
12
 
12
13
  co = cohere.Client()
13
14
 
@@ -7,7 +7,7 @@ from langtrace_python_sdk import langtrace
7
7
 
8
8
  _ = load_dotenv(find_dotenv())
9
9
 
10
- langtrace.init(write_to_langtrace_cloud=False)
10
+ langtrace.init(write_spans_to_console=True)
11
11
 
12
12
  co = cohere.Client()
13
13
 
@@ -17,10 +17,26 @@ student_custom_functions = [
17
17
  "name": "extract_student_info",
18
18
  "description": "Get the student information from the body of the input text",
19
19
  "parameter_definitions": {
20
- "name": {"type": "string", "description": "Name of the person", "required": True},
21
- "major": {"type": "string", "description": "Major subject.", "required": True},
22
- "school": {"type": "string", "description": "The university name.", "required": True},
23
- "grades": {"type": "integer", "description": "GPA of the student.", "required": True},
20
+ "name": {
21
+ "type": "string",
22
+ "description": "Name of the person",
23
+ "required": True,
24
+ },
25
+ "major": {
26
+ "type": "string",
27
+ "description": "Major subject.",
28
+ "required": True,
29
+ },
30
+ "school": {
31
+ "type": "string",
32
+ "description": "The university name.",
33
+ "required": True,
34
+ },
35
+ "grades": {
36
+ "type": "integer",
37
+ "description": "GPA of the student.",
38
+ "required": True,
39
+ },
24
40
  "club": {
25
41
  "type": "string",
26
42
  "description": "School club for extracurricular activities. ",
@@ -8,7 +8,7 @@ from openai import OpenAI
8
8
 
9
9
  from langtrace_python_sdk import langtrace
10
10
 
11
- langtrace.init(write_to_langtrace_cloud=False)
11
+ langtrace.init(write_spans_to_console=True)
12
12
  app = FastAPI()
13
13
  client = OpenAI()
14
14
 
@@ -6,7 +6,7 @@ from openai import OpenAI
6
6
  load_dotenv()
7
7
 
8
8
  langtrace.init(
9
- write_to_langtrace_cloud=False,
9
+ write_spans_to_console=True,
10
10
  api_host="http://localhost:3000",
11
11
  )
12
12
 
@@ -22,7 +22,9 @@ def groq_example():
22
22
  prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
23
23
 
24
24
  chain = prompt | chat
25
- result = chain.invoke({"text": "Explain the importance of low latency LLMs in 2 sentences or less."})
25
+ result = chain.invoke(
26
+ {"text": "Explain the importance of low latency LLMs in 2 sentences or less."}
27
+ )
26
28
  # print(result)
27
29
  return result
28
30
 
@@ -48,17 +48,12 @@ def invoke_tool(state):
48
48
  if multiply_call is None:
49
49
  raise Exception("No adder input found.")
50
50
 
51
- res = multiply.invoke(
52
- json.loads(multiply_call.get("function").get("arguments"))
53
- )
51
+ res = multiply.invoke(json.loads(multiply_call.get("function").get("arguments")))
54
52
 
55
- return ToolMessage(
56
- tool_call_id=multiply_call.get("id"),
57
- content=res
58
- )
53
+ return ToolMessage(tool_call_id=multiply_call.get("id"), content=res)
59
54
 
60
55
 
61
- @with_langtrace_root_span('langgraph_example')
56
+ @with_langtrace_root_span("langgraph_example")
62
57
  def basic():
63
58
 
64
59
  graph = MessageGraph()
@@ -67,10 +62,14 @@ def basic():
67
62
 
68
63
  graph.add_node("multiply", invoke_tool)
69
64
 
70
- graph.add_conditional_edges("oracle", router, {
71
- "multiply": "multiply",
72
- "end": END,
73
- })
65
+ graph.add_conditional_edges(
66
+ "oracle",
67
+ router,
68
+ {
69
+ "multiply": "multiply",
70
+ "end": END,
71
+ },
72
+ )
74
73
 
75
74
  graph.add_edge("multiply", END)
76
75
 
@@ -12,7 +12,7 @@ import nest_asyncio
12
12
 
13
13
  nest_asyncio.apply()
14
14
 
15
- langtrace.init(write_to_langtrace_cloud=False)
15
+ langtrace.init(write_spans_to_console=True)
16
16
 
17
17
 
18
18
  def multiply(a: int, b: int) -> int:
@@ -7,7 +7,7 @@ from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
7
7
  _ = load_dotenv(find_dotenv())
8
8
 
9
9
 
10
- langtrace.init(write_to_langtrace_cloud=False)
10
+ langtrace.init(write_spans_to_console=True)
11
11
 
12
12
 
13
13
  @with_langtrace_root_span()
@@ -9,10 +9,11 @@ from langtrace_python_sdk import langtrace
9
9
 
10
10
  _ = load_dotenv(find_dotenv())
11
11
 
12
- langtrace.init(write_to_langtrace_cloud=False)
12
+ langtrace.init(write_spans_to_console=True)
13
13
 
14
14
  client = AsyncOpenAI()
15
15
 
16
+
16
17
  # Example dummy function hard coded to return the same weather
17
18
  # In production, this could be your backend API or an external API
18
19
  def get_current_weather(location, unit="fahrenheit"):
@@ -20,7 +21,9 @@ def get_current_weather(location, unit="fahrenheit"):
20
21
  if "tokyo" in location.lower():
21
22
  return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
22
23
  elif "san francisco" in location.lower():
23
- return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
24
+ return json.dumps(
25
+ {"location": "San Francisco", "temperature": "72", "unit": unit}
26
+ )
24
27
  elif "paris" in location.lower():
25
28
  return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
26
29
  else:
@@ -29,7 +32,12 @@ def get_current_weather(location, unit="fahrenheit"):
29
32
 
30
33
  async def run_conversation():
31
34
  # Step 1: send the conversation and available functions to the model
32
- messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
35
+ messages = [
36
+ {
37
+ "role": "user",
38
+ "content": "What's the weather like in San Francisco, Tokyo, and Paris?",
39
+ }
40
+ ]
33
41
  tools = [
34
42
  {
35
43
  "type": "function",
@@ -90,4 +98,3 @@ async def run_conversation():
90
98
  ) # get a new response from the model where it can see the function response
91
99
  # print(second_response)
92
100
  return second_response
93
-
@@ -9,7 +9,7 @@ from langtrace_python_sdk import langtrace
9
9
 
10
10
  _ = load_dotenv(find_dotenv())
11
11
 
12
- langtrace.init(write_to_langtrace_cloud=False)
12
+ langtrace.init(write_spans_to_console=True)
13
13
 
14
14
  client = AsyncOpenAI()
15
15
 
@@ -21,7 +21,9 @@ def get_current_weather(location, unit="fahrenheit"):
21
21
  if "tokyo" in location.lower():
22
22
  return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
23
23
  elif "san francisco" in location.lower():
24
- return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
24
+ return json.dumps(
25
+ {"location": "San Francisco", "temperature": "72", "unit": unit}
26
+ )
25
27
  elif "paris" in location.lower():
26
28
  return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
27
29
  else:
@@ -42,7 +44,12 @@ def get_current_time(location):
42
44
 
43
45
  async def run_conversation():
44
46
  # Step 1: send the conversation and available functions to the model
45
- messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
47
+ messages = [
48
+ {
49
+ "role": "user",
50
+ "content": "What's the weather like in San Francisco, Tokyo, and Paris?",
51
+ }
52
+ ]
46
53
  tools = [
47
54
  {
48
55
  "type": "function",
@@ -78,7 +85,7 @@ async def run_conversation():
78
85
  "required": ["location"],
79
86
  },
80
87
  },
81
- }
88
+ },
82
89
  ]
83
90
  response = await client.chat.completions.create(
84
91
  model="gpt-4",
@@ -95,29 +102,34 @@ async def run_conversation():
95
102
  name = ""
96
103
  arguments = ""
97
104
  async for chunk in response:
98
- if chunk.choices[0].delta is not None and chunk.choices[0].delta.tool_calls is not None:
105
+ if (
106
+ chunk.choices[0].delta is not None
107
+ and chunk.choices[0].delta.tool_calls is not None
108
+ ):
99
109
  for choice in chunk.choices:
100
110
  for tool_call in choice.delta.tool_calls:
101
111
  if tool_call.id and id != tool_call.id:
102
112
  id = tool_call.id if tool_call.id else ""
103
- name = tool_call.function.name if tool_call.function and tool_call.function.name else ""
113
+ name = (
114
+ tool_call.function.name
115
+ if tool_call.function and tool_call.function.name
116
+ else ""
117
+ )
104
118
  tool_call_dict[name] = {
105
119
  "id": id,
106
- "function": {
107
- "name": name,
108
- "arguments": arguments
109
- },
110
- "type": "function"
120
+ "function": {"name": name, "arguments": arguments},
121
+ "type": "function",
111
122
  }
112
- arguments += tool_call.function.arguments if tool_call.function and tool_call.function.arguments else ""
123
+ arguments += (
124
+ tool_call.function.arguments
125
+ if tool_call.function and tool_call.function.arguments
126
+ else ""
127
+ )
113
128
  if name != "":
114
129
  tool_call_dict[name] = {
115
130
  "id": id,
116
- "function": {
117
- "name": name,
118
- "arguments": arguments
119
- },
120
- "type": "function"
131
+ "function": {"name": name, "arguments": arguments},
132
+ "type": "function",
121
133
  }
122
134
  for key, value in tool_call_dict.items():
123
135
  tool_calls.append(value)
@@ -133,9 +145,9 @@ async def run_conversation():
133
145
  # messages.append(response_message) # extend conversation with assistant's reply
134
146
  # Step 4: send the info for each function call and function response to the model
135
147
  for tool_call in tool_calls:
136
- function_name = tool_call['function']['name']
148
+ function_name = tool_call["function"]["name"]
137
149
  function_to_call = available_functions[function_name]
138
- function_args = json.loads(tool_call['function']['arguments'])
150
+ function_args = json.loads(tool_call["function"]["arguments"])
139
151
  function_response = function_to_call(
140
152
  location=function_args.get("location"),
141
153
  unit=function_args.get("unit"),
@@ -143,10 +155,7 @@ async def run_conversation():
143
155
  func_res = json.loads(function_response)
144
156
  content = f"Use the below information to answer the user's question: The current weather in {func_res['location']} is {func_res['temperature']} degrees {func_res['unit']}"
145
157
  messages.append(
146
- {
147
- "role": "system",
148
- "content": content
149
- }
158
+ {"role": "system", "content": content}
150
159
  ) # extend conversation with function response
151
160
  print(messages)
152
161
  second_response = await client.chat.completions.create(
@@ -158,10 +167,13 @@ async def run_conversation():
158
167
  async for chunk in second_response:
159
168
  if chunk.choices[0].delta.content is not None:
160
169
  content = [
161
- choice.delta.content if choice.delta and
162
- choice.delta.content else ""
163
- for choice in chunk.choices]
164
- result.append(
165
- content[0] if len(content) > 0 else "")
170
+ (
171
+ choice.delta.content
172
+ if choice.delta and choice.delta.content
173
+ else ""
174
+ )
175
+ for choice in chunk.choices
176
+ ]
177
+ result.append(content[0] if len(content) > 0 else "")
166
178
  print("".join(result))
167
- # return second_response
179
+ # return second_response
@@ -3,11 +3,13 @@ from openai import OpenAI
3
3
 
4
4
  from langtrace_python_sdk import langtrace
5
5
  from langtrace_python_sdk.utils.with_root_span import (
6
- with_additional_attributes, with_langtrace_root_span)
6
+ with_additional_attributes,
7
+ with_langtrace_root_span,
8
+ )
7
9
 
8
10
  _ = load_dotenv(find_dotenv())
9
11
 
10
- langtrace.init(write_to_langtrace_cloud=False)
12
+ langtrace.init(write_spans_to_console=True)
11
13
  client = OpenAI()
12
14
 
13
15
 
@@ -15,7 +17,10 @@ client = OpenAI()
15
17
  def api():
16
18
  response = client.chat.completions.create(
17
19
  model="gpt-4",
18
- messages=[{"role": "system", "content": "Talk like a pirate"}, {"role": "user", "content": "Tell me a story in 3 sentences or less."}],
20
+ messages=[
21
+ {"role": "system", "content": "Talk like a pirate"},
22
+ {"role": "user", "content": "Tell me a story in 3 sentences or less."},
23
+ ],
19
24
  stream=True,
20
25
  # stream=False,
21
26
  )
@@ -31,11 +36,10 @@ def chat_completion():
31
36
  for chunk in response:
32
37
  if chunk.choices[0].delta.content is not None:
33
38
  content = [
34
- choice.delta.content if choice.delta and
35
- choice.delta.content else ""
36
- for choice in chunk.choices]
37
- result.append(
38
- content[0] if len(content) > 0 else "")
39
+ choice.delta.content if choice.delta and choice.delta.content else ""
40
+ for choice in chunk.choices
41
+ ]
42
+ result.append(content[0] if len(content) > 0 else "")
39
43
 
40
44
  print("".join(result))
41
45
  return response
@@ -6,7 +6,8 @@ from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
6
6
 
7
7
  _ = load_dotenv(find_dotenv())
8
8
 
9
- langtrace.init(write_to_langtrace_cloud=False)
9
+ langtrace.init(write_spans_to_console=True)
10
+
10
11
  client = OpenAI()
11
12
 
12
13
 
@@ -7,7 +7,8 @@ from langtrace_python_sdk import langtrace
7
7
 
8
8
  _ = load_dotenv(find_dotenv())
9
9
 
10
- langtrace.init(write_to_langtrace_cloud=False)
10
+ langtrace.init(write_spans_to_console=True)
11
+
11
12
 
12
13
  client = OpenAI()
13
14
 
@@ -51,11 +52,15 @@ def function_calling():
51
52
  for chunk in response:
52
53
  if chunk.choices[0].delta.function_call is not None:
53
54
  content = [
54
- choice.delta.function_call.arguments if choice.delta.function_call and
55
- choice.delta.function_call.arguments else ""
56
- for choice in chunk.choices]
57
- result.append(
58
- content[0] if len(content) > 0 else "")
55
+ (
56
+ choice.delta.function_call.arguments
57
+ if choice.delta.function_call
58
+ and choice.delta.function_call.arguments
59
+ else ""
60
+ )
61
+ for choice in chunk.choices
62
+ ]
63
+ result.append(content[0] if len(content) > 0 else "")
59
64
 
60
65
  print("".join(result))
61
66
 
@@ -6,7 +6,8 @@ from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
6
6
 
7
7
  _ = load_dotenv(find_dotenv())
8
8
 
9
- langtrace.init(write_to_langtrace_cloud=False)
9
+ langtrace.init(write_spans_to_console=True)
10
+
10
11
 
11
12
  client = OpenAI()
12
13
 
@@ -7,7 +7,7 @@ from langtrace_python_sdk import langtrace
7
7
 
8
8
  _ = load_dotenv(find_dotenv())
9
9
 
10
- langtrace.init(write_to_langtrace_cloud=False)
10
+ langtrace.init(write_spans_to_console=True)
11
11
 
12
12
  client = OpenAI()
13
13
 
@@ -9,7 +9,8 @@ from langtrace_python_sdk import langtrace
9
9
 
10
10
  _ = load_dotenv(find_dotenv())
11
11
 
12
- langtrace.init(write_to_langtrace_cloud=False)
12
+ langtrace.init(write_spans_to_console=True)
13
+
13
14
 
14
15
  client = OpenAI()
15
16
 
@@ -21,7 +22,9 @@ def get_current_weather(location, unit="fahrenheit"):
21
22
  if "tokyo" in location.lower():
22
23
  return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
23
24
  elif "san francisco" in location.lower():
24
- return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
25
+ return json.dumps(
26
+ {"location": "San Francisco", "temperature": "72", "unit": unit}
27
+ )
25
28
  elif "paris" in location.lower():
26
29
  return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
27
30
  else:
@@ -30,7 +33,12 @@ def get_current_weather(location, unit="fahrenheit"):
30
33
 
31
34
  def run_conversation():
32
35
  # Step 1: send the conversation and available functions to the model
33
- messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
36
+ messages = [
37
+ {
38
+ "role": "user",
39
+ "content": "What's the weather like in San Francisco, Tokyo, and Paris?",
40
+ }
41
+ ]
34
42
  tools = [
35
43
  {
36
44
  "type": "function",