create-message-kit 1.0.21 → 1.1.5-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -15,3 +15,7 @@ npx create-message-kit@latest
15
15
  ```bash
16
16
  yarn create message-kit
17
17
  ```
18
+
19
+ ```bash [npm]
20
+ npm init message-kit@latest
21
+ ```
package/index.js CHANGED
@@ -14,15 +14,11 @@ const packageJson = JSON.parse(
14
14
  fs.readFileSync(resolve(__dirname, "package.json"), "utf8"),
15
15
  );
16
16
  const version = packageJson.version;
17
- const pckMessageKitLib = JSON.parse(
18
- fs.readFileSync(resolve(__dirname, "../message-kit/package.json"), "utf8"),
19
- );
20
- const versionMessageKitLib = pckMessageKitLib.version;
21
17
  program
22
18
  .name("byob")
23
19
  .description("CLI to initialize projects")
24
20
  .action(async () => {
25
- log.info(pc.cyan(`Welcome to MessageKit v${versionMessageKitLib}!`));
21
+ log.info(pc.cyan(`Welcome to MessageKit CLI v${version}!`));
26
22
  const coolLogo = `
27
23
  ███╗ ███╗███████╗███████╗███████╗ █████╗ ██████╗ ███████╗██╗ ██╗██╗████████╗
28
24
  ████╗ ████║██╔════╝██╔════╝██╔════╝██╔══██╗██╔════╝ ██╔════╝██║ ██╔╝██║╚══██╔══╝
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "create-message-kit",
3
- "version": "1.0.21",
3
+ "version": "1.1.5-beta.2",
4
4
  "license": "MIT",
5
5
  "type": "module",
6
6
  "main": "index.js",
@@ -1,23 +1,23 @@
1
- import { HandlerContext } from "@xmtp/message-kit";
1
+ import { HandlerContext, SkillResponse } from "@xmtp/message-kit";
2
2
  import { getUserInfo, clearInfoCache, isOnXMTP } from "../lib/resolver.js";
3
- import { textGeneration } from "../lib/openai.js";
4
- import { processResponseWithSkill } from "../lib/openai.js";
5
3
  import { isAddress } from "viem";
6
- import { ens_agent_prompt } from "../prompt.js";
7
- import { clearChatHistories } from "../lib/openai.js";
4
+ import { clearMemory } from "../lib/gpt.js";
8
5
 
9
6
  export const frameUrl = "https://ens.steer.fun/";
10
7
  export const ensUrl = "https://app.ens.domains/";
11
8
  export const baseTxUrl = "https://base-tx-frame.vercel.app";
12
9
 
13
- export async function handleEns(context: HandlerContext) {
10
+ export async function handleEns(
11
+ context: HandlerContext,
12
+ ): Promise<SkillResponse> {
14
13
  const {
15
14
  message: {
16
15
  content: { command, params, sender },
17
16
  },
17
+ skill,
18
18
  } = context;
19
19
  if (command == "reset") {
20
- clear();
20
+ clearMemory();
21
21
  return { code: 200, message: "Conversation reset." };
22
22
  } else if (command == "renew") {
23
23
  // Destructure and validate parameters for the ens command
@@ -32,7 +32,7 @@ export async function handleEns(context: HandlerContext) {
32
32
 
33
33
  const data = await getUserInfo(domain);
34
34
 
35
- if (!data || data?.address !== sender?.address) {
35
+ if (!data?.address || data?.address !== sender?.address) {
36
36
  return {
37
37
  code: 403,
38
38
  message:
@@ -60,7 +60,7 @@ export async function handleEns(context: HandlerContext) {
60
60
  const { domain } = params;
61
61
 
62
62
  const data = await getUserInfo(domain);
63
- if (!data) {
63
+ if (!data?.ensDomain) {
64
64
  return {
65
65
  code: 404,
66
66
  message: "Domain not found.",
@@ -118,7 +118,7 @@ export async function handleEns(context: HandlerContext) {
118
118
  };
119
119
  } else {
120
120
  let message = `Looks like ${domain} is already registered!`;
121
- await context.skill("/cool " + domain);
121
+ await skill("/cool " + domain);
122
122
  return {
123
123
  code: 404,
124
124
  message,
@@ -148,39 +148,8 @@ export async function handleEns(context: HandlerContext) {
148
148
  code: 200,
149
149
  message: `${generateCoolAlternatives(domain)}`,
150
150
  };
151
- }
152
- }
153
-
154
- export async function ensAgent(context: HandlerContext) {
155
- if (!process?.env?.OPEN_AI_API_KEY) {
156
- console.warn("No OPEN_AI_API_KEY found in .env");
157
- return;
158
- }
159
-
160
- const {
161
- message: {
162
- content: { content, params },
163
- sender,
164
- },
165
- group,
166
- } = context;
167
-
168
- try {
169
- let userPrompt = params?.prompt ?? content;
170
- const userInfo = await getUserInfo(sender.address);
171
- if (!userInfo) {
172
- console.log("User info not found");
173
- return;
174
- }
175
- const { reply } = await textGeneration(
176
- sender.address,
177
- userPrompt,
178
- await ens_agent_prompt(userInfo),
179
- );
180
- await processResponseWithSkill(sender.address, reply, context);
181
- } catch (error) {
182
- console.error("Error during OpenAI call:", error);
183
- await context.send("An error occurred while processing your request.");
151
+ } else {
152
+ return { code: 400, message: "Command not found." };
184
153
  }
185
154
  }
186
155
 
@@ -206,6 +175,6 @@ export const generateCoolAlternatives = (domain: string) => {
206
175
  };
207
176
 
208
177
  export async function clear() {
209
- clearChatHistories();
178
+ clearMemory();
210
179
  clearInfoCache();
211
180
  }
@@ -1,10 +1,39 @@
1
1
  import { run, HandlerContext } from "@xmtp/message-kit";
2
- import { ensAgent } from "./handler/ens.js";
2
+ import { textGeneration, processMultilineResponse } from "./lib/gpt.js";
3
+ import { agent_prompt } from "./prompt.js";
4
+ import { getUserInfo } from "./lib/resolver.js";
3
5
 
4
6
  run(async (context: HandlerContext) => {
5
- /*All the commands are handled through the commands file*/
7
+ /*All the skills are handled through the skills file*/
6
8
  /* If its just text, it will be handled by the ensAgent*/
7
9
  /* If its a group message, it will be handled by the groupAgent*/
10
+ if (!process?.env?.OPEN_AI_API_KEY) {
11
+ console.warn("No OPEN_AI_API_KEY found in .env");
12
+ return;
13
+ }
8
14
 
9
- ensAgent(context);
15
+ const {
16
+ message: {
17
+ content: { content, params },
18
+ sender,
19
+ },
20
+ } = context;
21
+
22
+ try {
23
+ let userPrompt = params?.prompt ?? content;
24
+ const userInfo = await getUserInfo(sender.address);
25
+ if (!userInfo) {
26
+ console.log("User info not found");
27
+ return;
28
+ }
29
+ const { reply } = await textGeneration(
30
+ sender.address,
31
+ userPrompt,
32
+ await agent_prompt(userInfo),
33
+ );
34
+ await processMultilineResponse(sender.address, reply, context);
35
+ } catch (error) {
36
+ console.error("Error during OpenAI call:", error);
37
+ await context.send("An error occurred while processing your request.");
38
+ }
10
39
  });
@@ -0,0 +1,161 @@
1
+ import "dotenv/config";
2
+ import type { SkillGroup } from "@xmtp/message-kit";
3
+ import OpenAI from "openai";
4
+ const openai = new OpenAI({
5
+ apiKey: process.env.OPEN_AI_API_KEY,
6
+ });
7
+
8
+ type ChatHistoryEntry = { role: string; content: string };
9
+ type ChatHistories = Record<string, ChatHistoryEntry[]>;
10
+ // New ChatMemory class
11
+ class ChatMemory {
12
+ private histories: ChatHistories = {};
13
+
14
+ getHistory(address: string): ChatHistoryEntry[] {
15
+ return this.histories[address] || [];
16
+ }
17
+
18
+ addEntry(address: string, entry: ChatHistoryEntry) {
19
+ if (!this.histories[address]) {
20
+ this.histories[address] = [];
21
+ }
22
+ this.histories[address].push(entry);
23
+ }
24
+
25
+ initializeWithSystem(address: string, systemPrompt: string) {
26
+ if (this.getHistory(address).length === 0) {
27
+ this.addEntry(address, {
28
+ role: "system",
29
+ content: systemPrompt,
30
+ });
31
+ }
32
+ }
33
+
34
+ clear() {
35
+ this.histories = {};
36
+ }
37
+ }
38
+
39
+ // Create singleton instance
40
+ export const chatMemory = new ChatMemory();
41
+
42
+ export const clearMemory = () => {
43
+ chatMemory.clear();
44
+ };
45
+
46
+ export const PROMPT_RULES = `You are a helpful and playful agent called {NAME} that lives inside a web3 messaging app called Converse.
47
+ - You can respond with multiple messages if needed. Each message should be separated by a newline character.
48
+ - You can trigger skills by only sending the command in a newline message.
49
+ - Never announce actions without using a command separated by a newline character.
50
+ - Dont answer in markdown format, just answer in plaintext.
51
+ - Do not make guesses or assumptions
52
+ - Only answer if the verified information is in the prompt.
53
+ - Check that you are not missing a command
54
+ - Focus only on helping users with operations detailed below.
55
+ `;
56
+
57
+ export function PROMPT_SKILLS_AND_EXAMPLES(skills: SkillGroup[], tag: string) {
58
+ let foundSkills = skills.filter(
59
+ (skill) => skill.tag == `@${tag.toLowerCase()}`,
60
+ );
61
+ if (!foundSkills.length || !foundSkills[0] || !foundSkills[0].skills)
62
+ return "";
63
+ let returnPrompt = `\nCommands:\n${foundSkills[0].skills
64
+ .map((skill) => skill.command)
65
+ .join("\n")}\n\nExamples:\n${foundSkills[0].skills
66
+ .map((skill) => skill.examples)
67
+ .join("\n")}`;
68
+ return returnPrompt;
69
+ }
70
+
71
+ export async function textGeneration(
72
+ memoryKey: string,
73
+ userPrompt: string,
74
+ systemPrompt: string,
75
+ ) {
76
+ if (!memoryKey) {
77
+ clearMemory();
78
+ }
79
+ let messages = chatMemory.getHistory(memoryKey);
80
+ chatMemory.initializeWithSystem(memoryKey, systemPrompt);
81
+ if (messages.length === 0) {
82
+ messages.push({
83
+ role: "system",
84
+ content: systemPrompt,
85
+ });
86
+ }
87
+ messages.push({
88
+ role: "user",
89
+ content: userPrompt,
90
+ });
91
+ try {
92
+ const response = await openai.chat.completions.create({
93
+ model: "gpt-4o",
94
+ messages: messages as any,
95
+ });
96
+ const reply = response.choices[0].message.content;
97
+ messages.push({
98
+ role: "assistant",
99
+ content: reply || "No response from OpenAI.",
100
+ });
101
+ const cleanedReply = parseMarkdown(reply as string);
102
+ chatMemory.addEntry(memoryKey, {
103
+ role: "assistant",
104
+ content: cleanedReply,
105
+ });
106
+ return { reply: cleanedReply, history: messages };
107
+ } catch (error) {
108
+ console.error("Failed to fetch from OpenAI:", error);
109
+ throw error;
110
+ }
111
+ }
112
+
113
+ export async function processMultilineResponse(
114
+ memoryKey: string,
115
+ reply: string,
116
+ context: any,
117
+ ) {
118
+ if (!memoryKey) {
119
+ clearMemory();
120
+ }
121
+ let messages = reply
122
+ .split("\n")
123
+ .map((message: string) => parseMarkdown(message))
124
+ .filter((message): message is string => message.length > 0);
125
+
126
+ console.log(messages);
127
+ for (const message of messages) {
128
+ if (message.startsWith("/")) {
129
+ const response = await context.skill(message);
130
+ if (response && typeof response.message === "string") {
131
+ let msg = parseMarkdown(response.message);
132
+ chatMemory.addEntry(memoryKey, {
133
+ role: "system",
134
+ content: msg,
135
+ });
136
+ await context.send(response.message);
137
+ }
138
+ } else {
139
+ await context.send(message);
140
+ }
141
+ }
142
+ }
143
+ export function parseMarkdown(message: string) {
144
+ let trimmedMessage = message;
145
+ // Remove bold and underline markdown
146
+ trimmedMessage = trimmedMessage?.replace(/(\*\*|__)(.*?)\1/g, "$2");
147
+ // Remove markdown links, keeping only the URL
148
+ trimmedMessage = trimmedMessage?.replace(/\[([^\]]+)\]\(([^)]+)\)/g, "$2");
149
+ // Remove markdown headers
150
+ trimmedMessage = trimmedMessage?.replace(/^#+\s*(.*)$/gm, "$1");
151
+ // Remove inline code formatting
152
+ trimmedMessage = trimmedMessage?.replace(/`([^`]+)`/g, "$1");
153
+ // Remove single backticks at the start or end of the message
154
+ trimmedMessage = trimmedMessage?.replace(/^`|`$/g, "");
155
+ // Remove leading and trailing whitespace
156
+ trimmedMessage = trimmedMessage?.replace(/^\s+|\s+$/g, "");
157
+ // Remove any remaining leading or trailing whitespace
158
+ trimmedMessage = trimmedMessage.trim();
159
+
160
+ return trimmedMessage;
161
+ }
@@ -1,6 +1,6 @@
1
1
  import dotenv from "dotenv";
2
2
  dotenv.config();
3
-
3
+ import type { SkillGroup } from "@xmtp/message-kit";
4
4
  import OpenAI from "openai";
5
5
  const openai = new OpenAI({
6
6
  apiKey: process.env.OPEN_AI_API_KEY,
@@ -9,13 +9,91 @@ const openai = new OpenAI({
9
9
  export type ChatHistoryEntry = { role: string; content: string };
10
10
  export type ChatHistories = Record<string, ChatHistoryEntry[]>;
11
11
 
12
+ // New ChatMemory class
13
+ class ChatMemory {
14
+ private histories: ChatHistories = {};
15
+
16
+ getHistory(address: string): ChatHistoryEntry[] {
17
+ return this.histories[address] || [];
18
+ }
19
+
20
+ addEntry(address: string, entry: ChatHistoryEntry) {
21
+ if (!this.histories[address]) {
22
+ this.histories[address] = [];
23
+ }
24
+ this.histories[address].push(entry);
25
+ }
26
+
27
+ initializeWithSystem(address: string, systemPrompt: string) {
28
+ if (this.getHistory(address).length === 0) {
29
+ this.addEntry(address, {
30
+ role: "system",
31
+ content: systemPrompt,
32
+ });
33
+ }
34
+ }
35
+
36
+ clear() {
37
+ this.histories = {};
38
+ }
39
+ }
40
+
41
+ export const clearMemory = () => {
42
+ chatHistories = {};
43
+ };
44
+
45
+ // Create singleton instance
46
+ export const chatMemory = new ChatMemory();
47
+
12
48
  let chatHistories: ChatHistories = {};
49
+ export const PROMPT_RULES = `You are a helpful and playful agent called {NAME} that lives inside a web3 messaging app called Converse.
50
+ - You can respond with multiple messages if needed. Each message should be separated by a newline character.
51
+ - You can trigger skills by only sending the command in a newline message.
52
+ - Never announce actions without using a command separated by a newline character.
53
+ - Dont answer in markdown format, just answer in plaintext.
54
+ - Do not make guesses or assumptions
55
+ - Only answer if the verified information is in the prompt.
56
+ - Check that you are not missing a command
57
+ - Focus only on helping users with operations detailed below.
58
+ `;
59
+
60
+ export const PROMPT_SKILLS_AND_EXAMPLES = (skills: SkillGroup[]) => `
61
+ Commands:
62
+ ${skills
63
+ .map((skill) => skill.skills.map((s) => s.command).join("\n"))
64
+ .join("\n")}
65
+
66
+ Examples:
67
+ ${skills
68
+ .map((skill) => skill.skills.map((s) => s.examples).join("\n"))
69
+ .join("\n")}
70
+ `;
71
+
72
+ export async function agentResponse(
73
+ sender: { address: string },
74
+ userPrompt: string,
75
+ systemPrompt: string,
76
+ context: any,
77
+ ) {
78
+ try {
79
+ const { reply } = await textGeneration(
80
+ sender.address,
81
+ userPrompt,
82
+ systemPrompt,
83
+ );
84
+ await processMultilineResponse(sender.address, reply, context);
85
+ } catch (error) {
86
+ console.error("Error during OpenAI call:", error);
87
+ await context.reply("An error occurred while processing your request.");
88
+ }
89
+ }
13
90
  export async function textGeneration(
14
91
  address: string,
15
92
  userPrompt: string,
16
93
  systemPrompt: string,
17
94
  ) {
18
- let messages = chatHistories[address] || [];
95
+ let messages = chatMemory.getHistory(address);
96
+ chatMemory.initializeWithSystem(address, systemPrompt);
19
97
  if (messages.length === 0) {
20
98
  messages.push({
21
99
  role: "system",
@@ -36,9 +114,11 @@ export async function textGeneration(
36
114
  role: "assistant",
37
115
  content: reply || "No response from OpenAI.",
38
116
  });
39
- const cleanedReply = responseParser(reply as string);
40
- chatHistories[address] = messages;
41
- console.log("messages.length", messages.length);
117
+ const cleanedReply = parseMarkdown(reply as string);
118
+ chatMemory.addEntry(address, {
119
+ role: "assistant",
120
+ content: cleanedReply,
121
+ });
42
122
  return { reply: cleanedReply, history: messages };
43
123
  } catch (error) {
44
124
  console.error("Failed to fetch from OpenAI:", error);
@@ -46,68 +126,26 @@ export async function textGeneration(
46
126
  }
47
127
  }
48
128
 
49
- // New method to interpret an image
50
- export async function vision(imageData: Uint8Array, systemPrompt: string) {
51
- const base64Image = Buffer.from(imageData).toString("base64");
52
- const dataUrl = `data:image/jpeg;base64,${base64Image}`;
53
-
54
- // Create a new thread for each vision request
55
- const visionMessages = [
56
- {
57
- role: "system",
58
- content: systemPrompt,
59
- },
60
- {
61
- role: "user",
62
- content: [
63
- { type: "text", text: systemPrompt },
64
- {
65
- type: "image_url",
66
- image_url: {
67
- url: dataUrl,
68
- },
69
- },
70
- ],
71
- },
72
- ];
73
-
74
- try {
75
- const response = await openai.chat.completions.create({
76
- model: "gpt-4o",
77
- messages: visionMessages as any,
78
- });
79
- return response.choices[0].message.content;
80
- } catch (error) {
81
- console.error("Failed to interpret image with OpenAI:", error);
82
- throw error;
83
- }
84
- }
85
-
86
- export async function processResponseWithSkill(
129
+ export async function processMultilineResponse(
87
130
  address: string,
88
131
  reply: string,
89
132
  context: any,
90
133
  ) {
91
134
  let messages = reply
92
135
  .split("\n")
93
- .map((message: string) => responseParser(message))
136
+ .map((message: string) => parseMarkdown(message))
94
137
  .filter((message): message is string => message.length > 0);
95
138
 
96
139
  console.log(messages);
97
140
  for (const message of messages) {
98
141
  if (message.startsWith("/")) {
99
142
  const response = await context.skill(message);
100
- if (response && response.message) {
101
- let msg = responseParser(response.message);
102
-
103
- if (!chatHistories[address]) {
104
- chatHistories[address] = [];
105
- }
106
- chatHistories[address].push({
143
+ if (response && typeof response.message === "string") {
144
+ let msg = parseMarkdown(response.message);
145
+ chatMemory.addEntry(address, {
107
146
  role: "system",
108
147
  content: msg,
109
148
  });
110
-
111
149
  await context.send(response.message);
112
150
  }
113
151
  } else {
@@ -115,7 +153,7 @@ export async function processResponseWithSkill(
115
153
  }
116
154
  }
117
155
  }
118
- export function responseParser(message: string) {
156
+ export function parseMarkdown(message: string) {
119
157
  let trimmedMessage = message;
120
158
  // Remove bold and underline markdown
121
159
  trimmedMessage = trimmedMessage?.replace(/(\*\*|__)(.*?)\1/g, "$2");
@@ -134,7 +172,3 @@ export function responseParser(message: string) {
134
172
 
135
173
  return trimmedMessage;
136
174
  }
137
-
138
- export const clearChatHistories = () => {
139
- chatHistories = {};
140
- };
@@ -1,5 +1,6 @@
1
- import { Client } from "@xmtp/xmtp-js";
1
+ import type { Client } from "@xmtp/xmtp-js";
2
2
  import { isAddress } from "viem";
3
+ import type { HandlerContext } from "@xmtp/message-kit";
3
4
 
4
5
  export const converseEndpointURL =
5
6
  "https://converse-website-git-endpoit-ephemerahq.vercel.app";
@@ -16,6 +17,7 @@ export type ConverseProfile = {
16
17
  export type UserInfo = {
17
18
  ensDomain?: string | undefined;
18
19
  address?: string | undefined;
20
+ preferredName: string | undefined;
19
21
  converseUsername?: string | undefined;
20
22
  ensInfo?: EnsData | undefined;
21
23
  avatar?: string | undefined;
@@ -47,19 +49,20 @@ export const clearInfoCache = () => {
47
49
  export const getUserInfo = async (
48
50
  key: string,
49
51
  clientAddress?: string,
52
+ context?: HandlerContext,
50
53
  ): Promise<UserInfo | null> => {
51
54
  let data: UserInfo = infoCache.get(key) || {
52
55
  ensDomain: undefined,
53
56
  address: undefined,
54
57
  converseUsername: undefined,
55
58
  ensInfo: undefined,
59
+ preferredName: undefined,
56
60
  };
57
- //console.log("Getting user info", key, clientAddress);
58
61
  if (isAddress(clientAddress || "")) {
59
62
  data.address = clientAddress;
60
63
  } else if (isAddress(key || "")) {
61
64
  data.address = key;
62
- } else if (key.includes(".eth")) {
65
+ } else if (key?.includes(".eth")) {
63
66
  data.ensDomain = key;
64
67
  } else if (key == "@user" || key == "@me" || key == "@bot") {
65
68
  data.address = clientAddress;
@@ -74,16 +77,15 @@ export const getUserInfo = async (
74
77
  } else {
75
78
  data.converseUsername = key;
76
79
  }
77
-
80
+ data.preferredName = data.ensDomain || data.converseUsername || "Friend";
78
81
  let keyToUse = data.address || data.ensDomain || data.converseUsername;
79
82
  let cacheData = keyToUse && infoCache.get(keyToUse);
80
- if (cacheData) {
81
- //console.log("Getting user info", keyToUse, cacheData);
82
- return cacheData;
83
- } else {
84
- //console.log("Getting user info", keyToUse, data);
85
- }
83
+ //console.log("Getting user info", { cacheData, keyToUse, data });
84
+ if (cacheData) return cacheData;
86
85
 
86
+ context?.send(
87
+ "Hey there! Give me a sec while I fetch info about you first...",
88
+ );
87
89
  if (keyToUse?.includes(".eth")) {
88
90
  const response = await fetch(`https://ensdata.net/${keyToUse}`);
89
91
  const ensData: EnsData = (await response.json()) as EnsData;
@@ -113,6 +115,8 @@ export const getUserInfo = async (
113
115
  data.address = converseData?.address || undefined;
114
116
  data.avatar = converseData?.avatar || undefined;
115
117
  }
118
+
119
+ data.preferredName = data.ensDomain || data.converseUsername || "Friend";
116
120
  if (data.address) infoCache.set(data.address, data);
117
121
  return data;
118
122
  };
@@ -124,3 +128,24 @@ export const isOnXMTP = async (
124
128
  if (domain == "fabri.eth") return false;
125
129
  if (address) return (await client.canMessage([address])).length > 0;
126
130
  };
131
+
132
+ export const PROMPT_USER_CONTENT = (userInfo: UserInfo) => {
133
+ let { address, ensDomain, converseUsername, preferredName } = userInfo;
134
+ let prompt = `
135
+ User context:
136
+ - Start by fetch their domain from or Convese username
137
+ - Call the user by their name or domain, in case they have one
138
+ - Ask for a name (if they don't have one) so you can suggest domains.
139
+ - Users address is: ${address}`;
140
+ if (preferredName) prompt += `\n- Users name is: ${preferredName}`;
141
+ if (ensDomain) prompt += `\n- User ENS domain is: ${ensDomain}`;
142
+ if (converseUsername)
143
+ prompt += `\n- Converse username is: ${converseUsername}`;
144
+
145
+ prompt = prompt.replace("{ADDRESS}", address || "");
146
+ prompt = prompt.replace("{ENS_DOMAIN}", ensDomain || "");
147
+ prompt = prompt.replace("{CONVERSE_USERNAME}", converseUsername || "");
148
+ prompt = prompt.replace("{PREFERRED_NAME}", preferredName || "");
149
+
150
+ return prompt;
151
+ };