@langwatch/mcp-server 0.0.5 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/.env.example +2 -0
  2. package/.eslintrc.cjs +0 -1
  3. package/CHANGELOG.md +29 -0
  4. package/CONTRIBUTING.md +96 -0
  5. package/README.md +13 -6
  6. package/dist/index.js +7957 -1017
  7. package/dist/index.js.map +1 -1
  8. package/package.json +22 -9
  9. package/pnpm-workspace.yaml +2 -0
  10. package/pyproject.toml +17 -0
  11. package/src/index.ts +54 -11
  12. package/src/langwatch-api.ts +95 -85
  13. package/tests/evaluations.ipynb +649 -0
  14. package/tests/fixtures/azure/azure_openai_stream_bot_expected.py +102 -0
  15. package/tests/fixtures/azure/azure_openai_stream_bot_input.py +78 -0
  16. package/tests/fixtures/dspy/dspy_bot_expected.py +61 -0
  17. package/tests/fixtures/dspy/dspy_bot_input.py +53 -0
  18. package/tests/fixtures/fastapi/fastapi_app_expected.py +68 -0
  19. package/tests/fixtures/fastapi/fastapi_app_input.py +60 -0
  20. package/tests/fixtures/fastapi/prompt_management_fastapi_expected.py +114 -0
  21. package/tests/fixtures/fastapi/prompt_management_fastapi_input.py +88 -0
  22. package/tests/fixtures/haystack/haystack_bot_expected.py +141 -0
  23. package/tests/fixtures/haystack/haystack_bot_input.py +69 -0
  24. package/tests/fixtures/langchain/langchain_bot_expected.py +53 -0
  25. package/tests/fixtures/langchain/langchain_bot_input.py +45 -0
  26. package/tests/fixtures/langchain/langchain_bot_with_memory_expected.py +69 -0
  27. package/tests/fixtures/langchain/langchain_bot_with_memory_input.py +61 -0
  28. package/tests/fixtures/langchain/langchain_rag_bot_expected.py +97 -0
  29. package/tests/fixtures/langchain/langchain_rag_bot_input.py +77 -0
  30. package/tests/fixtures/langchain/langchain_rag_bot_vertex_ai_expected.py +116 -0
  31. package/tests/fixtures/langchain/langchain_rag_bot_vertex_ai_input.py +81 -0
  32. package/tests/fixtures/langchain/langgraph_rag_bot_with_threads_expected.py +331 -0
  33. package/tests/fixtures/langchain/langgraph_rag_bot_with_threads_input.py +106 -0
  34. package/tests/fixtures/litellm/litellm_bot_expected.py +40 -0
  35. package/tests/fixtures/litellm/litellm_bot_input.py +35 -0
  36. package/tests/fixtures/openai/openai_bot_expected.py +43 -0
  37. package/tests/fixtures/openai/openai_bot_function_call_expected.py +91 -0
  38. package/tests/fixtures/openai/openai_bot_function_call_input.py +82 -0
  39. package/tests/fixtures/openai/openai_bot_input.py +36 -0
  40. package/tests/fixtures/openai/openai_bot_rag_expected.py +73 -0
  41. package/tests/fixtures/openai/openai_bot_rag_input.py +51 -0
  42. package/tests/fixtures/opentelemetry/openinference_dspy_bot_expected.py +63 -0
  43. package/tests/fixtures/opentelemetry/openinference_dspy_bot_input.py +58 -0
  44. package/tests/fixtures/opentelemetry/openinference_langchain_bot_expected.py +53 -0
  45. package/tests/fixtures/opentelemetry/openinference_langchain_bot_input.py +52 -0
  46. package/tests/fixtures/opentelemetry/openinference_openai_bot_expected.py +49 -0
  47. package/tests/fixtures/opentelemetry/openinference_openai_bot_input.py +41 -0
  48. package/tests/fixtures/opentelemetry/openllmetry_openai_bot_expected.py +44 -0
  49. package/tests/fixtures/opentelemetry/openllmetry_openai_bot_input.py +40 -0
  50. package/tests/fixtures/strands/strands_bot_expected.py +84 -0
  51. package/tests/fixtures/strands/strands_bot_input.py +52 -0
  52. package/tests/scenario-openai.test.ts +158 -0
  53. package/tsconfig.json +0 -1
  54. package/uv.lock +2607 -0
  55. package/vitest.config.js +7 -0
@@ -0,0 +1,102 @@
1
+ import os
2
+ from typing import Optional
3
+ from dotenv import load_dotenv
4
+
5
+ from langwatch.types import RAGChunk
6
+
7
+ load_dotenv()
8
+
9
+ import chainlit as cl
10
+ from openai import AzureOpenAI
11
+
12
+ import langwatch
13
+
14
+ client = AzureOpenAI(
15
+ api_key=os.getenv("AZURE_OPENAI_API_KEY"),
16
+ api_version="2024-02-01",
17
+ azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), # type: ignore
18
+ )
19
+ langwatch.api_key = os.getenv("LANGWATCH_API_KEY")
20
+
21
+
22
+ @langwatch.span(type="rag")
23
+ def retrieve(query: Optional[str] = None):
24
+ search_results = [
25
+ {
26
+ "id": "result_1",
27
+ "content": "This is the first result",
28
+ },
29
+ {
30
+ "id": "result_2",
31
+ "content": "This is the second result",
32
+ },
33
+ ]
34
+
35
+ langwatch.get_current_span().update(
36
+ contexts=[
37
+ RAGChunk(
38
+ document_id=docs["id"],
39
+ content=docs["content"],
40
+ )
41
+ for docs in search_results
42
+ ],
43
+ )
44
+
45
+ return search_results
46
+
47
+
48
+ @cl.on_message
49
+ @langwatch.trace()
50
+ async def main(message: cl.Message):
51
+ langwatch.get_current_trace().autotrack_openai_calls(client)
52
+
53
+ msg = cl.Message(
54
+ content="",
55
+ )
56
+
57
+ langwatch.get_current_trace().update(
58
+ trace_id=message.id,
59
+ metadata={"labels": ["azure"], "user_id": message.author},
60
+ )
61
+
62
+ completion = client.chat.completions.create(
63
+ model="gpt-35-turbo-0613",
64
+ messages=[
65
+ {
66
+ "role": "system",
67
+ "content": "come up with a query for searching the database based on user question, 3 words max",
68
+ },
69
+ {"role": "user", "content": message.content},
70
+ ],
71
+ )
72
+
73
+ query = completion.choices[0].message.content
74
+ search_results = retrieve(query=query)
75
+ results = "\n".join([f"{docs['id']}: {docs['content']}" for docs in search_results])
76
+
77
+ completion = client.chat.completions.create(
78
+ model="gpt-35-turbo-0613",
79
+ messages=[
80
+ {
81
+ "role": "system",
82
+ "content": f"""
83
+ You are a helpful assistant that only reply in short tweet-like responses, using lots of emojis.
84
+
85
+ We just made a search in the database for {query} and found {len(search_results)} results. Here they are, use that to help answering user:
86
+
87
+ {results}
88
+ """,
89
+ },
90
+ {"role": "user", "content": message.content},
91
+ ],
92
+ stream=True,
93
+ )
94
+
95
+ for part in completion:
96
+ if len(part.choices) == 0:
97
+ continue
98
+
99
+ if token := part.choices[0].delta.content or "":
100
+ await msg.stream_token(token)
101
+
102
+ await msg.update()
@@ -0,0 +1,78 @@
1
+ import os
2
+ from typing import Optional
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+
7
+ import chainlit as cl
8
+ from openai import AzureOpenAI
9
+
10
+ client = AzureOpenAI(
11
+ api_key=os.getenv("AZURE_OPENAI_API_KEY"),
12
+ api_version="2024-02-01",
13
+ azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), # type: ignore
14
+ )
15
+
16
+
17
+ def retrieve(query: Optional[str] = None):
18
+ search_results = [
19
+ {
20
+ "id": "result_1",
21
+ "content": "This is the first result",
22
+ },
23
+ {
24
+ "id": "result_2",
25
+ "content": "This is the second result",
26
+ },
27
+ ]
28
+
29
+ return search_results
30
+
31
+
32
+ @cl.on_message
33
+ async def main(message: cl.Message):
34
+ msg = cl.Message(
35
+ content="",
36
+ )
37
+
38
+ completion = client.chat.completions.create(
39
+ model="gpt-35-turbo-0613",
40
+ messages=[
41
+ {
42
+ "role": "system",
43
+ "content": "come up with a query for searching the database based on user question, 3 words max",
44
+ },
45
+ {"role": "user", "content": message.content},
46
+ ],
47
+ )
48
+
49
+ query = completion.choices[0].message.content
50
+ search_results = retrieve(query=query)
51
+ results = "\n".join([f"{docs['id']}: {docs['content']}" for docs in search_results])
52
+
53
+ completion = client.chat.completions.create(
54
+ model="gpt-35-turbo-0613",
55
+ messages=[
56
+ {
57
+ "role": "system",
58
+ "content": f"""
59
+ You are a helpful assistant that only reply in short tweet-like responses, using lots of emojis.
60
+
61
+ We just made a search in the database for {query} and found {len(search_results)} results. Here they are, use that to help answering user:
62
+
63
+ {results}
64
+ """,
65
+ },
66
+ {"role": "user", "content": message.content},
67
+ ],
68
+ stream=True,
69
+ )
70
+
71
+ for part in completion:
72
+ if len(part.choices) == 0:
73
+ continue
74
+
75
+ if token := part.choices[0].delta.content or "":
76
+ await msg.stream_token(token)
77
+
78
+ await msg.update()
@@ -0,0 +1,61 @@
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ import chainlit as cl
7
+
8
+ import langwatch
9
+
10
+ import dspy
11
+
12
+
13
+ lm = dspy.LM("openai/gpt-5", api_key=os.environ["OPENAI_API_KEY"], temperature=1)
14
+
15
+ colbertv2_wiki17_abstracts = dspy.ColBERTv2(
16
+ url="http://20.102.90.50:2017/wiki17_abstracts"
17
+ )
18
+
19
+ dspy.settings.configure(lm=lm, rm=colbertv2_wiki17_abstracts)
20
+
21
+
22
+ class GenerateAnswer(dspy.Signature):
23
+ """Answer questions with careful explanations to the user."""
24
+
25
+ context = dspy.InputField(desc="may contain relevant facts")
26
+ question = dspy.InputField()
27
+ answer = dspy.OutputField(desc="markdown formatted answer, use some emojis")
28
+
29
+
30
+ class RAG(dspy.Module):
31
+ def __init__(self, num_passages=3):
32
+ super().__init__()
33
+
34
+ self.retrieve = dspy.Retrieve(k=num_passages)
35
+ self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
36
+
37
+ def forward(self, question):
38
+ context = self.retrieve(question).passages # type: ignore
39
+ prediction = self.generate_answer(question=question, context=context)
40
+ return dspy.Prediction(answer=prediction.answer)
41
+
42
+
43
+ @cl.on_message
44
+ @langwatch.trace()
45
+ async def main(message: cl.Message):
46
+ langwatch.get_current_trace().autotrack_dspy()
47
+ langwatch.get_current_trace().update(
48
+ metadata={"labels": ["dspy", "thread"], "thread_id": "90210"},
49
+ )
50
+
51
+ msg = cl.Message(
52
+ content="",
53
+ )
54
+
55
+ program = RAG()
56
+ prediction = program(question=message.content)
57
+
58
+ await msg.stream_token(prediction.answer)
59
+ await msg.update()
60
+
61
+ return prediction.answer
@@ -0,0 +1,53 @@
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ import chainlit as cl
7
+
8
+ import dspy
9
+
10
+
11
+ lm = dspy.LM("openai/gpt-5", api_key=os.environ["OPENAI_API_KEY"], temperature=1)
12
+
13
+ colbertv2_wiki17_abstracts = dspy.ColBERTv2(
14
+ url="http://20.102.90.50:2017/wiki17_abstracts"
15
+ )
16
+
17
+ dspy.settings.configure(lm=lm, rm=colbertv2_wiki17_abstracts)
18
+
19
+
20
+ class GenerateAnswer(dspy.Signature):
21
+ """Answer questions with careful explanations to the user."""
22
+
23
+ context = dspy.InputField(desc="may contain relevant facts")
24
+ question = dspy.InputField()
25
+ answer = dspy.OutputField(desc="markdown formatted answer, use some emojis")
26
+
27
+
28
+ class RAG(dspy.Module):
29
+ def __init__(self, num_passages=3):
30
+ super().__init__()
31
+
32
+ self.retrieve = dspy.Retrieve(k=num_passages)
33
+ self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
34
+
35
+ def forward(self, question):
36
+ context = self.retrieve(question).passages # type: ignore
37
+ prediction = self.generate_answer(question=question, context=context)
38
+ return dspy.Prediction(answer=prediction.answer)
39
+
40
+
41
+ @cl.on_message
42
+ async def main(message: cl.Message):
43
+ msg = cl.Message(
44
+ content="",
45
+ )
46
+
47
+ program = RAG()
48
+ prediction = program(question=message.content)
49
+
50
+ await msg.stream_token(prediction.answer)
51
+ await msg.update()
52
+
53
+ return prediction.answer
@@ -0,0 +1,68 @@
1
+ from dotenv import load_dotenv
2
+ from fastapi.responses import StreamingResponse
3
+ from fastapi.testclient import TestClient
4
+
5
+ load_dotenv()
6
+
7
+ from fastapi import FastAPI
8
+ from openai import OpenAI
9
+ from pydantic import BaseModel
10
+
11
+ client = OpenAI()
12
+
13
+ import langwatch
14
+
15
+ app = FastAPI()
16
+
17
+
18
+ class EndpointParams(BaseModel):
19
+ input: str
20
+
21
+
22
+ class CompletionStreaming:
23
+ @langwatch.trace(name="fastapi_sample_endpoint")
24
+ async def execute(self, input: str):
25
+ langwatch.get_current_trace().autotrack_openai_calls(client)
26
+ langwatch.get_current_trace().update(
27
+ metadata={"label": "fastapi"},
28
+ )
29
+
30
+ completion = client.chat.completions.create(
31
+ model="gpt-4o",
32
+ messages=[
33
+ {
34
+ "role": "system",
35
+ "content": "You are a helpful assistant that only reply in short tweet-like responses, using lots of emojis.",
36
+ },
37
+ {"role": "user", "content": input},
38
+ ],
39
+ stream=True,
40
+ )
41
+
42
+ for chunk in completion:
43
+ content = chunk.choices[0].delta.content
44
+ if content is not None:
45
+ yield content
46
+
47
+
48
+ @app.post("/")
49
+ async def fastapi_sample_endpoint(params: EndpointParams):
50
+ return StreamingResponse(CompletionStreaming().execute(params.input)) # type: ignore
51
+
52
+
53
+ def call_fastapi_sample_endpoint(input: str) -> str:
54
+ test_client = TestClient(app)
55
+ response = test_client.post("/", json={"input": input})
56
+
57
+ return response.text
58
+
59
+
60
+ if __name__ == "__main__":
61
+ import uvicorn
62
+ import os
63
+
64
+ # Test one llm call before starting the server
65
+ print(call_fastapi_sample_endpoint("Hello, world!"))
66
+
67
+ port = int(os.environ.get("PORT", 9000))
68
+ uvicorn.run(app, host="0.0.0.0", port=port)
@@ -0,0 +1,60 @@
1
+ from dotenv import load_dotenv
2
+ from fastapi.responses import StreamingResponse
3
+ from fastapi.testclient import TestClient
4
+
5
+ load_dotenv()
6
+
7
+ from fastapi import FastAPI
8
+ from openai import OpenAI
9
+ from pydantic import BaseModel
10
+
11
+ client = OpenAI()
12
+
13
+ app = FastAPI()
14
+
15
+
16
+ class EndpointParams(BaseModel):
17
+ input: str
18
+
19
+
20
+ class CompletionStreaming:
21
+ async def execute(self, input: str):
22
+ completion = client.chat.completions.create(
23
+ model="gpt-4o",
24
+ messages=[
25
+ {
26
+ "role": "system",
27
+ "content": "You are a helpful assistant that only reply in short tweet-like responses, using lots of emojis.",
28
+ },
29
+ {"role": "user", "content": input},
30
+ ],
31
+ stream=True,
32
+ )
33
+
34
+ for chunk in completion:
35
+ content = chunk.choices[0].delta.content
36
+ if content is not None:
37
+ yield content
38
+
39
+
40
+ @app.post("/")
41
+ async def fastapi_sample_endpoint(params: EndpointParams):
42
+ return StreamingResponse(CompletionStreaming().execute(params.input)) # type: ignore
43
+
44
+
45
+ def call_fastapi_sample_endpoint(input: str) -> str:
46
+ test_client = TestClient(app)
47
+ response = test_client.post("/", json={"input": input})
48
+
49
+ return response.text
50
+
51
+
52
+ if __name__ == "__main__":
53
+ import uvicorn
54
+ import os
55
+
56
+ # Test one llm call before starting the server
57
+ print(call_fastapi_sample_endpoint("Hello, world!"))
58
+
59
+ port = int(os.environ.get("PORT", 9000))
60
+ uvicorn.run(app, host="0.0.0.0", port=port)
@@ -0,0 +1,114 @@
1
+ """
2
+ Example demonstrating LangWatch prompt management operations.
3
+
4
+ This example shows how to:
5
+ 1. Create a new prompt
6
+ 2. Retrieve and use a prompt
7
+ 3. Update a prompt
8
+ 4. Use the updated prompt
9
+ 5. Delete a prompt
10
+
11
+ Run this example with:
12
+ python examples/prompt_management.py
13
+ """
14
+
15
+ from dotenv import load_dotenv
16
+ import langwatch
17
+ import uuid
18
+ from openai import OpenAI
19
+
20
+ load_dotenv()
21
+
22
+ client = OpenAI()
23
+
24
+ # Initialize LangWatch (ensure you have LANGWATCH_API_KEY set)
25
+ langwatch.setup(debug=True)
26
+
27
+
28
+ @langwatch.span()
29
+ def example():
30
+ # Autotrack OpenAI calls
31
+ langwatch.get_current_trace().autotrack_openai_calls(client)
32
+
33
+ print("=== LangWatch Prompt Management Example ===\n")
34
+
35
+ # 1. Create a new prompt
36
+ print("1. Creating a new prompt...")
37
+ short_uuid = str(uuid.uuid4())[:8]
38
+ prompt = langwatch.prompts.create(
39
+ handle=f"something/example_prompt_{short_uuid}",
40
+ scope="PROJECT", # optional - 'ORGANIZATION' or 'PROJECT'
41
+ author_id=None, # optional
42
+ prompt="You are a helpful assistant. Specialize in {{subject}}.", # optional
43
+ messages=[ # optional -- you cannot set a system message and a prompt at the same time
44
+ {"role": "user", "content": "{{question}}"},
45
+ ],
46
+ inputs=[{"identifier": "question", "type": "str"}], # optional
47
+ outputs=[
48
+ {"identifier": "answer", "type": "str", "json_schema": {"type": "str"}}
49
+ ], # optional
50
+ )
51
+ print(f"Created prompt with id: {prompt.id}")
52
+ print(f"Created prompt with handle: {prompt.handle}")
53
+
54
+ # 2. Get and use the prompt
55
+ print("2. Retrieving the prompt...")
56
+ retrieved_prompt_specific_version = langwatch.prompts.get(
57
+ prompt.handle, version_number=prompt.version_number
58
+ )
59
+ print(f"Retrieved prompt: {retrieved_prompt_specific_version.version_number}")
60
+
61
+ # Use the prompt (example usage)
62
+ print("Using the created prompt...")
63
+
64
+ # Compile the prompt with variables
65
+ compiled_prompt = retrieved_prompt.compile(
66
+ question="What is the capital of France?"
67
+ )
68
+ print(f"Compiled prompt: {compiled_prompt.prompt}")
69
+ print(f"Compiled prompt messages: {compiled_prompt.messages}")
70
+
71
+ # 3. Update the prompt
72
+ print("3. Updating the prompt...")
73
+ updated_prompt = langwatch.prompts.update(
74
+ prompt.handle,
75
+ handle=f"updated_example_prompt_{short_uuid}", # optional
76
+ scope="PROJECT", # optional - 'ORGANIZATION' or 'PROJECT'
77
+ prompt="You are obsessed with {{subject}} and talk in CAPS.", # optional
78
+ )
79
+ print(f"Updated prompt name: {updated_prompt.name}")
80
+ print(f"Prompt ID remains: {updated_prompt.id}")
81
+
82
+ # 4. Use the updated prompt
83
+ print("Using the updated prompt...")
84
+
85
+ # Compile the updated prompt to show the difference
86
+ updated_compiled = updated_prompt.compile_strict(
87
+ subject="quantum computing", question="How does it work in 10 words or less?"
88
+ )
89
+ print(f"Updated compiled prompt: {updated_compiled.prompt}")
90
+ print(f"Updated compiled prompt messages: {updated_compiled.messages}")
91
+
92
+ # This is where you would use the prompt in your application
93
+ # For example, you could use the prompt to generate a response
94
+ response = client.chat.completions.create(
95
+ model=updated_compiled.model.split("openai/")[1],
96
+ messages=updated_compiled.messages,
97
+ )
98
+
99
+ print(f"Response: {response.choices[0].message.content}")
100
+
101
+ # 5. Delete the prompt
102
+ print("5. Deleting the prompt...")
103
+ result = langwatch.prompts.delete(updated_prompt.handle)
104
+ print(f"Deletion result: {result}")
105
+ print("Prompt management example completed successfully!")
106
+
107
+
108
+ @langwatch.trace()
109
+ def main():
110
+ example()
111
+
112
+
113
+ if __name__ == "__main__":
114
+ main()
@@ -0,0 +1,88 @@
1
+ """
2
+ Example demonstrating prompt management operations.
3
+
4
+ This example shows how to:
5
+ 1. Create a new prompt
6
+ 2. Retrieve and use a prompt
7
+ 3. Update a prompt
8
+ 4. Use the updated prompt
9
+ 5. Delete a prompt
10
+
11
+ Run this example with:
12
+ python examples/prompt_management.py
13
+ """
14
+
15
+ from dotenv import load_dotenv
16
+ import uuid
17
+ from openai import OpenAI
18
+
19
+ load_dotenv()
20
+
21
+ client = OpenAI()
22
+
23
+
24
+ def example():
25
+ print("=== Prompt Management Example ===\n")
26
+
27
+ # 1. Create a new prompt
28
+ print("1. Creating a new prompt...")
29
+ short_uuid = str(uuid.uuid4())[:8]
30
+
31
+ # Manual prompt management would go here
32
+ prompt_template = "You are a helpful assistant. Specialize in {{subject}}."
33
+ messages_template = [
34
+ {"role": "user", "content": "{{question}}"},
35
+ ]
36
+
37
+ print(f"Created prompt template: {prompt_template}")
38
+
39
+ # 2. Get and use the prompt
40
+ print("2. Using the prompt...")
41
+
42
+ # Compile the prompt with variables (manual substitution)
43
+ compiled_prompt = prompt_template.replace("{{subject}}", "quantum computing")
44
+ compiled_messages = [
45
+ {"role": "user", "content": "What is the capital of France?"}
46
+ ]
47
+ print(f"Compiled prompt: {compiled_prompt}")
48
+ print(f"Compiled prompt messages: {compiled_messages}")
49
+
50
+ # 3. Update the prompt
51
+ print("3. Updating the prompt...")
52
+ updated_prompt_template = "You are obsessed with {{subject}} and talk in CAPS."
53
+ print(f"Updated prompt template: {updated_prompt_template}")
54
+
55
+ # 4. Use the updated prompt
56
+ print("Using the updated prompt...")
57
+
58
+ # Compile the updated prompt to show the difference
59
+ updated_compiled = updated_prompt_template.replace("{{subject}}", "quantum computing")
60
+ updated_messages = [
61
+ {"role": "user", "content": "How does it work in 10 words or less?"}
62
+ ]
63
+ print(f"Updated compiled prompt: {updated_compiled}")
64
+ print(f"Updated compiled prompt messages: {updated_messages}")
65
+
66
+ # This is where you would use the prompt in your application
67
+ # For example, you could use the prompt to generate a response
68
+ response = client.chat.completions.create(
69
+ model="gpt-4",
70
+ messages=[
71
+ {"role": "system", "content": updated_compiled},
72
+ *updated_messages
73
+ ],
74
+ )
75
+
76
+ print(f"Response: {response.choices[0].message.content}")
77
+
78
+ # 5. Delete the prompt
79
+ print("5. Deleting the prompt...")
80
+ print("Prompt management example completed successfully!")
81
+
82
+
83
+ def main():
84
+ example()
85
+
86
+
87
+ if __name__ == "__main__":
88
+ main()