create-message-kit 1.1.7-beta.12 → 1.1.7-beta.14

Sign up to get free protection for your applications and to get access to all the features.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "create-message-kit",
3
- "version": "1.1.7-beta.12",
3
+ "version": "1.1.7-beta.14",
4
4
  "license": "MIT",
5
5
  "type": "module",
6
6
  "main": "index.js",
@@ -4,17 +4,11 @@
4
4
  "type": "module",
5
5
  "scripts": {
6
6
  "build": "tsc",
7
- "dev": "tsc -w & sleep 1 && nodemon --quiet dist/index.js",
7
+ "dev": "tsc -w & sleep 1 && node --watch dist/index.js",
8
8
  "start": "node dist/index.js"
9
9
  },
10
10
  "dependencies": {
11
- "@xmtp/message-kit": "workspace:*",
12
- "openai": "^4.65.0"
13
- },
14
- "devDependencies": {
15
- "@types/node": "^20.14.2",
16
- "nodemon": "^3.1.3",
17
- "typescript": "^5.4.5"
11
+ "@xmtp/message-kit": "workspace:*"
18
12
  },
19
13
  "engines": {
20
14
  "node": ">=20"
@@ -4,17 +4,12 @@
4
4
  "type": "module",
5
5
  "scripts": {
6
6
  "build": "tsc",
7
- "dev": "tsc -w & sleep 1 && nodemon --quiet dist/index.js",
7
+ "dev": "tsc -w & sleep 1 && node --watch dist/index.js",
8
8
  "start": "node dist/index.js"
9
9
  },
10
10
  "dependencies": {
11
11
  "@xmtp/message-kit": "workspace:*"
12
12
  },
13
- "devDependencies": {
14
- "@types/node": "^20.14.2",
15
- "nodemon": "^3.1.3",
16
- "typescript": "^5.4.5"
17
- },
18
13
  "engines": {
19
14
  "node": ">=20"
20
15
  }
@@ -4,16 +4,12 @@
4
4
  "type": "module",
5
5
  "scripts": {
6
6
  "build": "tsc",
7
- "dev": "tsc -w & sleep 1 && nodemon --quiet dist/index.js",
7
+ "dev": "tsc -w & sleep 1 && node --watch dist/index.js",
8
8
  "start": "node dist/index.js"
9
9
  },
10
10
  "dependencies": {
11
11
  "@stackso/js-core": "^0.3.1",
12
- "@xmtp/message-kit": "workspace:^",
13
- "openai": "^4.52.0"
14
- },
15
- "devDependencies": {
16
- "nodemon": "^3.1.3"
12
+ "@xmtp/message-kit": "workspace:*"
17
13
  },
18
14
  "engines": {
19
15
  "node": ">=20"
@@ -6,7 +6,7 @@ run(async (context: HandlerContext) => {
6
6
 
7
7
  if (!group) {
8
8
  context.send(
9
- "This This bot only works in group chats. Please add this bot to a group to continue",
9
+ "This bot only works in group chats. Please add this bot to a group to continue",
10
10
  );
11
11
  }
12
12
  });
@@ -1,161 +0,0 @@
1
- import "dotenv/config";
2
- import type { SkillGroup } from "@xmtp/message-kit";
3
- import OpenAI from "openai";
4
- const openai = new OpenAI({
5
- apiKey: process.env.OPEN_AI_API_KEY,
6
- });
7
-
8
- type ChatHistoryEntry = { role: string; content: string };
9
- type ChatHistories = Record<string, ChatHistoryEntry[]>;
10
- // New ChatMemory class
11
- class ChatMemory {
12
- private histories: ChatHistories = {};
13
-
14
- getHistory(address: string): ChatHistoryEntry[] {
15
- return this.histories[address] || [];
16
- }
17
-
18
- addEntry(address: string, entry: ChatHistoryEntry) {
19
- if (!this.histories[address]) {
20
- this.histories[address] = [];
21
- }
22
- this.histories[address].push(entry);
23
- }
24
-
25
- initializeWithSystem(address: string, systemPrompt: string) {
26
- if (this.getHistory(address).length === 0) {
27
- this.addEntry(address, {
28
- role: "system",
29
- content: systemPrompt,
30
- });
31
- }
32
- }
33
-
34
- clear() {
35
- this.histories = {};
36
- }
37
- }
38
-
39
- // Create singleton instance
40
- export const chatMemory = new ChatMemory();
41
-
42
- export const clearMemory = () => {
43
- chatMemory.clear();
44
- };
45
-
46
- export const PROMPT_RULES = `You are a helpful and playful agent called {NAME} that lives inside a web3 messaging app called Converse.
47
- - You can respond with multiple messages if needed. Each message should be separated by a newline character.
48
- - You can trigger skills by only sending the command in a newline message.
49
- - Never announce actions without using a command separated by a newline character.
50
- - Dont answer in markdown format, just answer in plaintext.
51
- - Do not make guesses or assumptions
52
- - Only answer if the verified information is in the prompt.
53
- - Check that you are not missing a command
54
- - Focus only on helping users with operations detailed below.
55
- `;
56
-
57
- export function PROMPT_SKILLS_AND_EXAMPLES(skills: SkillGroup[], tag: string) {
58
- let foundSkills = skills.filter(
59
- (skill) => skill.tag == `@${tag.toLowerCase()}`,
60
- );
61
- if (!foundSkills.length || !foundSkills[0] || !foundSkills[0].skills)
62
- return "";
63
- let returnPrompt = `\nCommands:\n${foundSkills[0].skills
64
- .map((skill) => skill.command)
65
- .join("\n")}\n\nExamples:\n${foundSkills[0].skills
66
- .map((skill) => skill.examples)
67
- .join("\n")}`;
68
- return returnPrompt;
69
- }
70
-
71
- export async function textGeneration(
72
- memoryKey: string,
73
- userPrompt: string,
74
- systemPrompt: string,
75
- ) {
76
- if (!memoryKey) {
77
- clearMemory();
78
- }
79
- let messages = chatMemory.getHistory(memoryKey);
80
- chatMemory.initializeWithSystem(memoryKey, systemPrompt);
81
- if (messages.length === 0) {
82
- messages.push({
83
- role: "system",
84
- content: systemPrompt,
85
- });
86
- }
87
- messages.push({
88
- role: "user",
89
- content: userPrompt,
90
- });
91
- try {
92
- const response = await openai.chat.completions.create({
93
- model: "gpt-4o",
94
- messages: messages as any,
95
- });
96
- const reply = response.choices[0].message.content;
97
- messages.push({
98
- role: "assistant",
99
- content: reply || "No response from OpenAI.",
100
- });
101
- const cleanedReply = parseMarkdown(reply as string);
102
- chatMemory.addEntry(memoryKey, {
103
- role: "assistant",
104
- content: cleanedReply,
105
- });
106
- return { reply: cleanedReply, history: messages };
107
- } catch (error) {
108
- console.error("Failed to fetch from OpenAI:", error);
109
- throw error;
110
- }
111
- }
112
-
113
- export async function processMultilineResponse(
114
- memoryKey: string,
115
- reply: string,
116
- context: any,
117
- ) {
118
- if (!memoryKey) {
119
- clearMemory();
120
- }
121
- let messages = reply
122
- .split("\n")
123
- .map((message: string) => parseMarkdown(message))
124
- .filter((message): message is string => message.length > 0);
125
-
126
- console.log(messages);
127
- for (const message of messages) {
128
- if (message.startsWith("/")) {
129
- const response = await context.executeSkill(message);
130
- if (response && typeof response.message === "string") {
131
- let msg = parseMarkdown(response.message);
132
- chatMemory.addEntry(memoryKey, {
133
- role: "system",
134
- content: msg,
135
- });
136
- await context.send(response.message);
137
- }
138
- } else {
139
- await context.send(message);
140
- }
141
- }
142
- }
143
- export function parseMarkdown(message: string) {
144
- let trimmedMessage = message;
145
- // Remove bold and underline markdown
146
- trimmedMessage = trimmedMessage?.replace(/(\*\*|__)(.*?)\1/g, "$2");
147
- // Remove markdown links, keeping only the URL
148
- trimmedMessage = trimmedMessage?.replace(/\[([^\]]+)\]\(([^)]+)\)/g, "$2");
149
- // Remove markdown headers
150
- trimmedMessage = trimmedMessage?.replace(/^#+\s*(.*)$/gm, "$1");
151
- // Remove inline code formatting
152
- trimmedMessage = trimmedMessage?.replace(/`([^`]+)`/g, "$1");
153
- // Remove single backticks at the start or end of the message
154
- trimmedMessage = trimmedMessage?.replace(/^`|`$/g, "");
155
- // Remove leading and trailing whitespace
156
- trimmedMessage = trimmedMessage?.replace(/^\s+|\s+$/g, "");
157
- // Remove any remaining leading or trailing whitespace
158
- trimmedMessage = trimmedMessage.trim();
159
-
160
- return trimmedMessage;
161
- }
@@ -1,174 +0,0 @@
1
- import dotenv from "dotenv";
2
- dotenv.config();
3
- import type { SkillGroup } from "@xmtp/message-kit";
4
- import OpenAI from "openai";
5
- const openai = new OpenAI({
6
- apiKey: process.env.OPEN_AI_API_KEY,
7
- });
8
-
9
- export type ChatHistoryEntry = { role: string; content: string };
10
- export type ChatHistories = Record<string, ChatHistoryEntry[]>;
11
-
12
- // New ChatMemory class
13
- class ChatMemory {
14
- private histories: ChatHistories = {};
15
-
16
- getHistory(address: string): ChatHistoryEntry[] {
17
- return this.histories[address] || [];
18
- }
19
-
20
- addEntry(address: string, entry: ChatHistoryEntry) {
21
- if (!this.histories[address]) {
22
- this.histories[address] = [];
23
- }
24
- this.histories[address].push(entry);
25
- }
26
-
27
- initializeWithSystem(address: string, systemPrompt: string) {
28
- if (this.getHistory(address).length === 0) {
29
- this.addEntry(address, {
30
- role: "system",
31
- content: systemPrompt,
32
- });
33
- }
34
- }
35
-
36
- clear() {
37
- this.histories = {};
38
- }
39
- }
40
-
41
- export const clearMemory = () => {
42
- chatHistories = {};
43
- };
44
-
45
- // Create singleton instance
46
- export const chatMemory = new ChatMemory();
47
-
48
- let chatHistories: ChatHistories = {};
49
- export const PROMPT_RULES = `You are a helpful and playful agent called {NAME} that lives inside a web3 messaging app called Converse.
50
- - You can respond with multiple messages if needed. Each message should be separated by a newline character.
51
- - You can trigger skills by only sending the command in a newline message.
52
- - Never announce actions without using a command separated by a newline character.
53
- - Dont answer in markdown format, just answer in plaintext.
54
- - Do not make guesses or assumptions
55
- - Only answer if the verified information is in the prompt.
56
- - Check that you are not missing a command
57
- - Focus only on helping users with operations detailed below.
58
- `;
59
-
60
- export const PROMPT_SKILLS_AND_EXAMPLES = (skills: SkillGroup[]) => `
61
- Commands:
62
- ${skills
63
- .map((skill) => skill.skills.map((s) => s.command).join("\n"))
64
- .join("\n")}
65
-
66
- Examples:
67
- ${skills
68
- .map((skill) => skill.skills.map((s) => s.examples).join("\n"))
69
- .join("\n")}
70
- `;
71
-
72
- export async function agentResponse(
73
- sender: { address: string },
74
- userPrompt: string,
75
- systemPrompt: string,
76
- context: any,
77
- ) {
78
- try {
79
- const { reply } = await textGeneration(
80
- sender.address,
81
- userPrompt,
82
- systemPrompt,
83
- );
84
- await processMultilineResponse(sender.address, reply, context);
85
- } catch (error) {
86
- console.error("Error during OpenAI call:", error);
87
- await context.reply("An error occurred while processing your request.");
88
- }
89
- }
90
- export async function textGeneration(
91
- address: string,
92
- userPrompt: string,
93
- systemPrompt: string,
94
- ) {
95
- let messages = chatMemory.getHistory(address);
96
- chatMemory.initializeWithSystem(address, systemPrompt);
97
- if (messages.length === 0) {
98
- messages.push({
99
- role: "system",
100
- content: systemPrompt,
101
- });
102
- }
103
- messages.push({
104
- role: "user",
105
- content: userPrompt,
106
- });
107
- try {
108
- const response = await openai.chat.completions.create({
109
- model: "gpt-4o",
110
- messages: messages as any,
111
- });
112
- const reply = response.choices[0].message.content;
113
- messages.push({
114
- role: "assistant",
115
- content: reply || "No response from OpenAI.",
116
- });
117
- const cleanedReply = parseMarkdown(reply as string);
118
- chatMemory.addEntry(address, {
119
- role: "assistant",
120
- content: cleanedReply,
121
- });
122
- return { reply: cleanedReply, history: messages };
123
- } catch (error) {
124
- console.error("Failed to fetch from OpenAI:", error);
125
- throw error;
126
- }
127
- }
128
-
129
- export async function processMultilineResponse(
130
- address: string,
131
- reply: string,
132
- context: any,
133
- ) {
134
- let messages = reply
135
- .split("\n")
136
- .map((message: string) => parseMarkdown(message))
137
- .filter((message): message is string => message.length > 0);
138
-
139
- console.log(messages);
140
- for (const message of messages) {
141
- if (message.startsWith("/")) {
142
- const response = await context.skill(message);
143
- if (response && typeof response.message === "string") {
144
- let msg = parseMarkdown(response.message);
145
- chatMemory.addEntry(address, {
146
- role: "system",
147
- content: msg,
148
- });
149
- await context.send(response.message);
150
- }
151
- } else {
152
- await context.send(message);
153
- }
154
- }
155
- }
156
- export function parseMarkdown(message: string) {
157
- let trimmedMessage = message;
158
- // Remove bold and underline markdown
159
- trimmedMessage = trimmedMessage?.replace(/(\*\*|__)(.*?)\1/g, "$2");
160
- // Remove markdown links, keeping only the URL
161
- trimmedMessage = trimmedMessage?.replace(/\[([^\]]+)\]\(([^)]+)\)/g, "$2");
162
- // Remove markdown headers
163
- trimmedMessage = trimmedMessage?.replace(/^#+\s*(.*)$/gm, "$1");
164
- // Remove inline code formatting
165
- trimmedMessage = trimmedMessage?.replace(/`([^`]+)`/g, "$1");
166
- // Remove single backticks at the start or end of the message
167
- trimmedMessage = trimmedMessage?.replace(/^`|`$/g, "");
168
- // Remove leading and trailing whitespace
169
- trimmedMessage = trimmedMessage?.replace(/^\s+|\s+$/g, "");
170
- // Remove any remaining leading or trailing whitespace
171
- trimmedMessage = trimmedMessage.trim();
172
-
173
- return trimmedMessage;
174
- }
@@ -1,151 +0,0 @@
1
- import type { Client } from "@xmtp/xmtp-js";
2
- import { isAddress } from "viem";
3
- import type { HandlerContext } from "@xmtp/message-kit";
4
-
5
- export const converseEndpointURL =
6
- "https://converse-website-git-endpoit-ephemerahq.vercel.app";
7
- //export const converseEndpointURL = "http://localhost:3000";
8
-
9
- export type InfoCache = Map<string, UserInfo>;
10
- export type ConverseProfile = {
11
- address: string | null;
12
- onXmtp: boolean;
13
- avatar: string | null;
14
- formattedName: string | null;
15
- name: string | null;
16
- };
17
- export type UserInfo = {
18
- ensDomain?: string | undefined;
19
- address?: string | undefined;
20
- preferredName: string | undefined;
21
- converseUsername?: string | undefined;
22
- ensInfo?: EnsData | undefined;
23
- avatar?: string | undefined;
24
- };
25
- export interface EnsData {
26
- address?: string;
27
- avatar?: string;
28
- avatar_small?: string;
29
- converse?: string;
30
- avatar_url?: string;
31
- contentHash?: string;
32
- description?: string;
33
- ens?: string;
34
- ens_primary?: string;
35
- github?: string;
36
- resolverAddress?: string;
37
- twitter?: string;
38
- url?: string;
39
- wallets?: {
40
- eth?: string;
41
- };
42
- }
43
-
44
- let infoCache: InfoCache = new Map();
45
-
46
- export const clearInfoCache = () => {
47
- infoCache.clear();
48
- };
49
- export const getUserInfo = async (
50
- key: string,
51
- clientAddress?: string,
52
- context?: HandlerContext,
53
- ): Promise<UserInfo | null> => {
54
- let data: UserInfo = infoCache.get(key) || {
55
- ensDomain: undefined,
56
- address: undefined,
57
- converseUsername: undefined,
58
- ensInfo: undefined,
59
- preferredName: undefined,
60
- };
61
- if (isAddress(clientAddress || "")) {
62
- data.address = clientAddress;
63
- } else if (isAddress(key || "")) {
64
- data.address = key;
65
- } else if (key?.includes(".eth")) {
66
- data.ensDomain = key;
67
- } else if (key == "@user" || key == "@me" || key == "@bot") {
68
- data.address = clientAddress;
69
- data.ensDomain = key.replace("@", "") + ".eth";
70
- data.converseUsername = key.replace("@", "");
71
- } else if (key == "@alix") {
72
- data.address = "0x3a044b218BaE80E5b9E16609443A192129A67BeA";
73
- data.converseUsername = "alix";
74
- } else if (key == "@bo") {
75
- data.address = "0xbc3246461ab5e1682baE48fa95172CDf0689201a";
76
- data.converseUsername = "bo";
77
- } else {
78
- data.converseUsername = key;
79
- }
80
- data.preferredName = data.ensDomain || data.converseUsername || "Friend";
81
- let keyToUse = data.address || data.ensDomain || data.converseUsername;
82
- let cacheData = keyToUse && infoCache.get(keyToUse);
83
- //console.log("Getting user info", { cacheData, keyToUse, data });
84
- if (cacheData) return cacheData;
85
-
86
- context?.send(
87
- "Hey there! Give me a sec while I fetch info about you first...",
88
- );
89
- if (keyToUse?.includes(".eth")) {
90
- const response = await fetch(`https://ensdata.net/${keyToUse}`);
91
- const ensData: EnsData = (await response.json()) as EnsData;
92
- //console.log("Ens data", ensData);
93
- if (ensData) {
94
- data.ensInfo = ensData;
95
- data.ensDomain = ensData?.ens;
96
- data.address = ensData?.address;
97
- }
98
- } else if (keyToUse) {
99
- keyToUse = keyToUse.replace("@", "");
100
- const response = await fetch(`${converseEndpointURL}/profile/${keyToUse}`, {
101
- method: "POST",
102
- headers: {
103
- "Content-Type": "application/json",
104
- Accept: "application/json",
105
- },
106
- body: JSON.stringify({
107
- peer: keyToUse,
108
- }),
109
- });
110
- const converseData = (await response.json()) as ConverseProfile;
111
- //if (process.env.MSG_LOG === "true")
112
- //console.log("Converse data", keyToUse, converseData);
113
- data.converseUsername =
114
- converseData?.formattedName || converseData?.name || undefined;
115
- data.address = converseData?.address || undefined;
116
- data.avatar = converseData?.avatar || undefined;
117
- }
118
-
119
- data.preferredName = data.ensDomain || data.converseUsername || "Friend";
120
- if (data.address) infoCache.set(data.address, data);
121
- return data;
122
- };
123
- export const isOnXMTP = async (
124
- client: Client,
125
- domain: string | undefined,
126
- address: string | undefined,
127
- ) => {
128
- if (domain == "fabri.eth") return false;
129
- if (address) return (await client.canMessage([address])).length > 0;
130
- };
131
-
132
- export const PROMPT_USER_CONTENT = (userInfo: UserInfo) => {
133
- let { address, ensDomain, converseUsername, preferredName } = userInfo;
134
- let prompt = `
135
- User context:
136
- - Start by fetch their domain from or Convese username
137
- - Call the user by their name or domain, in case they have one
138
- - Ask for a name (if they don't have one) so you can suggest domains.
139
- - Users address is: ${address}`;
140
- if (preferredName) prompt += `\n- Users name is: ${preferredName}`;
141
- if (ensDomain) prompt += `\n- User ENS domain is: ${ensDomain}`;
142
- if (converseUsername)
143
- prompt += `\n- Converse username is: ${converseUsername}`;
144
-
145
- prompt = prompt.replace("{ADDRESS}", address || "");
146
- prompt = prompt.replace("{ENS_DOMAIN}", ensDomain || "");
147
- prompt = prompt.replace("{CONVERSE_USERNAME}", converseUsername || "");
148
- prompt = prompt.replace("{PREFERRED_NAME}", preferredName || "");
149
-
150
- return prompt;
151
- };
@@ -1,24 +0,0 @@
1
- import { HandlerContext } from "@xmtp/message-kit";
2
-
3
- export async function handler(context: HandlerContext) {
4
- const {
5
- skills,
6
- group,
7
- message: {
8
- content: { command },
9
- },
10
- } = context;
11
-
12
- if (command == "help") {
13
- const intro =
14
- "Available experiences:\n" +
15
- skills
16
- ?.flatMap((app) => app.skills)
17
- .map((skill) => `${skill.command} - ${skill.description}`)
18
- .join("\n") +
19
- "\nUse these skills to interact with specific apps.";
20
- context.send(intro);
21
- } else if (command == "id") {
22
- context.send(context.group?.id);
23
- }
24
- }
@@ -1,65 +0,0 @@
1
- import { HandlerContext } from "@xmtp/message-kit";
2
- import { textGeneration } from "../lib/gpt.js";
3
- import { vision } from "../lib/vision.js";
4
- import { getUserInfo } from "../lib/resolver.js";
5
-
6
- export async function handler(context: HandlerContext) {
7
- if (!process?.env?.OPEN_AI_API_KEY) {
8
- console.warn("No OPEN_AI_API_KEY found in .env");
9
- return;
10
- }
11
- const {
12
- members,
13
- skill,
14
- message: {
15
- typeId,
16
- content: { attachment },
17
- sender,
18
- },
19
- } = context;
20
-
21
- if (!members) {
22
- return;
23
- }
24
- let senderInfo = await getUserInfo(sender.address);
25
- if (attachment && typeId === "remoteStaticAttachment") {
26
- const { data } = attachment;
27
- const response = await vision(
28
- data,
29
- "This image is the bill of a restaurant dinner. Return the total. If you can't find the total, return 'undefined'.",
30
- );
31
- if (response?.includes("undefined")) {
32
- return;
33
- } else {
34
- context.reply(
35
- "You uploaded a new bill. Let's go ahead and split the bill.",
36
- );
37
- }
38
- if (response) {
39
- const prompt = `You a split wise agent that splits the bill between the members of this group except for the sender and bot.\n
40
-
41
- ## Instructions:
42
- When you receive the totals you should split the bill between the members of the group and send to each one a transaction frame
43
- - For the sake of this demo. Only send the payment to the sender not to all the other members.
44
-
45
- ### Return message
46
- Don't use markdown. Return messages in a json object The first message detailing the split. The second one you will send the command for the receiver to pay directly to the sender.
47
- Example:
48
- [
49
- "This are the details: Total: $49.52. Tip (20%): $9.90",
50
- "All users owe X USDC to @${senderInfo?.converseUsername}. Pay here:",
51
- "/send @${senderInfo?.converseUsername} $9.90"
52
- ]
53
- `;
54
-
55
- //I want the reply to be an array of messages so the bot feels like is sending multuple ones
56
- const { reply } = await textGeneration(sender.address, response, prompt);
57
- let splitMessages = JSON.parse(reply);
58
- for (const message of splitMessages) {
59
- let msg = message as string;
60
- if (msg.startsWith("/")) await skill(msg);
61
- else await context.send(msg);
62
- }
63
- }
64
- }
65
- }
@@ -1,161 +0,0 @@
1
- import "dotenv/config";
2
- import type { SkillGroup } from "@xmtp/message-kit";
3
- import OpenAI from "openai";
4
- const openai = new OpenAI({
5
- apiKey: process.env.OPEN_AI_API_KEY,
6
- });
7
-
8
- type ChatHistoryEntry = { role: string; content: string };
9
- type ChatHistories = Record<string, ChatHistoryEntry[]>;
10
- // New ChatMemory class
11
- class ChatMemory {
12
- private histories: ChatHistories = {};
13
-
14
- getHistory(address: string): ChatHistoryEntry[] {
15
- return this.histories[address] || [];
16
- }
17
-
18
- addEntry(address: string, entry: ChatHistoryEntry) {
19
- if (!this.histories[address]) {
20
- this.histories[address] = [];
21
- }
22
- this.histories[address].push(entry);
23
- }
24
-
25
- initializeWithSystem(address: string, systemPrompt: string) {
26
- if (this.getHistory(address).length === 0) {
27
- this.addEntry(address, {
28
- role: "system",
29
- content: systemPrompt,
30
- });
31
- }
32
- }
33
-
34
- clear() {
35
- this.histories = {};
36
- }
37
- }
38
-
39
- // Create singleton instance
40
- export const chatMemory = new ChatMemory();
41
-
42
- export const clearMemory = () => {
43
- chatMemory.clear();
44
- };
45
-
46
- export const PROMPT_RULES = `You are a helpful and playful agent called {NAME} that lives inside a web3 messaging app called Converse.
47
- - You can respond with multiple messages if needed. Each message should be separated by a newline character.
48
- - You can trigger skills by only sending the command in a newline message.
49
- - Never announce actions without using a command separated by a newline character.
50
- - Dont answer in markdown format, just answer in plaintext.
51
- - Do not make guesses or assumptions
52
- - Only answer if the verified information is in the prompt.
53
- - Check that you are not missing a command
54
- - Focus only on helping users with operations detailed below.
55
- `;
56
-
57
- export function PROMPT_SKILLS_AND_EXAMPLES(skills: SkillGroup[], tag: string) {
58
- let foundSkills = skills.filter(
59
- (skill) => skill.tag == `@${tag.toLowerCase()}`,
60
- );
61
- if (!foundSkills.length || !foundSkills[0] || !foundSkills[0].skills)
62
- return "";
63
- let returnPrompt = `\nCommands:\n${foundSkills[0].skills
64
- .map((skill) => skill.command)
65
- .join("\n")}\n\nExamples:\n${foundSkills[0].skills
66
- .map((skill) => skill.examples)
67
- .join("\n")}`;
68
- return returnPrompt;
69
- }
70
-
71
- export async function textGeneration(
72
- memoryKey: string,
73
- userPrompt: string,
74
- systemPrompt: string,
75
- ) {
76
- if (!memoryKey) {
77
- clearMemory();
78
- }
79
- let messages = chatMemory.getHistory(memoryKey);
80
- chatMemory.initializeWithSystem(memoryKey, systemPrompt);
81
- if (messages.length === 0) {
82
- messages.push({
83
- role: "system",
84
- content: systemPrompt,
85
- });
86
- }
87
- messages.push({
88
- role: "user",
89
- content: userPrompt,
90
- });
91
- try {
92
- const response = await openai.chat.completions.create({
93
- model: "gpt-4o",
94
- messages: messages as any,
95
- });
96
- const reply = response.choices[0].message.content;
97
- messages.push({
98
- role: "assistant",
99
- content: reply || "No response from OpenAI.",
100
- });
101
- const cleanedReply = parseMarkdown(reply as string);
102
- chatMemory.addEntry(memoryKey, {
103
- role: "assistant",
104
- content: cleanedReply,
105
- });
106
- return { reply: cleanedReply, history: messages };
107
- } catch (error) {
108
- console.error("Failed to fetch from OpenAI:", error);
109
- throw error;
110
- }
111
- }
112
-
113
- export async function processMultilineResponse(
114
- memoryKey: string,
115
- reply: string,
116
- context: any,
117
- ) {
118
- if (!memoryKey) {
119
- clearMemory();
120
- }
121
- let messages = reply
122
- .split("\n")
123
- .map((message: string) => parseMarkdown(message))
124
- .filter((message): message is string => message.length > 0);
125
-
126
- console.log(messages);
127
- for (const message of messages) {
128
- if (message.startsWith("/")) {
129
- const response = await context.executeSkill(message);
130
- if (response && typeof response.message === "string") {
131
- let msg = parseMarkdown(response.message);
132
- chatMemory.addEntry(memoryKey, {
133
- role: "system",
134
- content: msg,
135
- });
136
- await context.send(response.message);
137
- }
138
- } else {
139
- await context.send(message);
140
- }
141
- }
142
- }
143
- export function parseMarkdown(message: string) {
144
- let trimmedMessage = message;
145
- // Remove bold and underline markdown
146
- trimmedMessage = trimmedMessage?.replace(/(\*\*|__)(.*?)\1/g, "$2");
147
- // Remove markdown links, keeping only the URL
148
- trimmedMessage = trimmedMessage?.replace(/\[([^\]]+)\]\(([^)]+)\)/g, "$2");
149
- // Remove markdown headers
150
- trimmedMessage = trimmedMessage?.replace(/^#+\s*(.*)$/gm, "$1");
151
- // Remove inline code formatting
152
- trimmedMessage = trimmedMessage?.replace(/`([^`]+)`/g, "$1");
153
- // Remove single backticks at the start or end of the message
154
- trimmedMessage = trimmedMessage?.replace(/^`|`$/g, "");
155
- // Remove leading and trailing whitespace
156
- trimmedMessage = trimmedMessage?.replace(/^\s+|\s+$/g, "");
157
- // Remove any remaining leading or trailing whitespace
158
- trimmedMessage = trimmedMessage.trim();
159
-
160
- return trimmedMessage;
161
- }
@@ -1,174 +0,0 @@
1
- import dotenv from "dotenv";
2
- dotenv.config();
3
- import type { SkillGroup } from "@xmtp/message-kit";
4
- import OpenAI from "openai";
5
- const openai = new OpenAI({
6
- apiKey: process.env.OPEN_AI_API_KEY,
7
- });
8
-
9
- export type ChatHistoryEntry = { role: string; content: string };
10
- export type ChatHistories = Record<string, ChatHistoryEntry[]>;
11
-
12
- // New ChatMemory class
13
- class ChatMemory {
14
- private histories: ChatHistories = {};
15
-
16
- getHistory(address: string): ChatHistoryEntry[] {
17
- return this.histories[address] || [];
18
- }
19
-
20
- addEntry(address: string, entry: ChatHistoryEntry) {
21
- if (!this.histories[address]) {
22
- this.histories[address] = [];
23
- }
24
- this.histories[address].push(entry);
25
- }
26
-
27
- initializeWithSystem(address: string, systemPrompt: string) {
28
- if (this.getHistory(address).length === 0) {
29
- this.addEntry(address, {
30
- role: "system",
31
- content: systemPrompt,
32
- });
33
- }
34
- }
35
-
36
- clear() {
37
- this.histories = {};
38
- }
39
- }
40
-
41
- export const clearMemory = () => {
42
- chatHistories = {};
43
- };
44
-
45
- // Create singleton instance
46
- export const chatMemory = new ChatMemory();
47
-
48
- let chatHistories: ChatHistories = {};
49
- export const PROMPT_RULES = `You are a helpful and playful agent called {NAME} that lives inside a web3 messaging app called Converse.
50
- - You can respond with multiple messages if needed. Each message should be separated by a newline character.
51
- - You can trigger skills by only sending the command in a newline message.
52
- - Never announce actions without using a command separated by a newline character.
53
- - Dont answer in markdown format, just answer in plaintext.
54
- - Do not make guesses or assumptions
55
- - Only answer if the verified information is in the prompt.
56
- - Check that you are not missing a command
57
- - Focus only on helping users with operations detailed below.
58
- `;
59
-
60
- export const PROMPT_SKILLS_AND_EXAMPLES = (skills: SkillGroup[]) => `
61
- Commands:
62
- ${skills
63
- .map((skill) => skill.skills.map((s) => s.command).join("\n"))
64
- .join("\n")}
65
-
66
- Examples:
67
- ${skills
68
- .map((skill) => skill.skills.map((s) => s.examples).join("\n"))
69
- .join("\n")}
70
- `;
71
-
72
- export async function agentResponse(
73
- sender: { address: string },
74
- userPrompt: string,
75
- systemPrompt: string,
76
- context: any,
77
- ) {
78
- try {
79
- const { reply } = await textGeneration(
80
- sender.address,
81
- userPrompt,
82
- systemPrompt,
83
- );
84
- await processMultilineResponse(sender.address, reply, context);
85
- } catch (error) {
86
- console.error("Error during OpenAI call:", error);
87
- await context.reply("An error occurred while processing your request.");
88
- }
89
- }
90
- export async function textGeneration(
91
- address: string,
92
- userPrompt: string,
93
- systemPrompt: string,
94
- ) {
95
- let messages = chatMemory.getHistory(address);
96
- chatMemory.initializeWithSystem(address, systemPrompt);
97
- if (messages.length === 0) {
98
- messages.push({
99
- role: "system",
100
- content: systemPrompt,
101
- });
102
- }
103
- messages.push({
104
- role: "user",
105
- content: userPrompt,
106
- });
107
- try {
108
- const response = await openai.chat.completions.create({
109
- model: "gpt-4o",
110
- messages: messages as any,
111
- });
112
- const reply = response.choices[0].message.content;
113
- messages.push({
114
- role: "assistant",
115
- content: reply || "No response from OpenAI.",
116
- });
117
- const cleanedReply = parseMarkdown(reply as string);
118
- chatMemory.addEntry(address, {
119
- role: "assistant",
120
- content: cleanedReply,
121
- });
122
- return { reply: cleanedReply, history: messages };
123
- } catch (error) {
124
- console.error("Failed to fetch from OpenAI:", error);
125
- throw error;
126
- }
127
- }
128
-
129
- export async function processMultilineResponse(
130
- address: string,
131
- reply: string,
132
- context: any,
133
- ) {
134
- let messages = reply
135
- .split("\n")
136
- .map((message: string) => parseMarkdown(message))
137
- .filter((message): message is string => message.length > 0);
138
-
139
- console.log(messages);
140
- for (const message of messages) {
141
- if (message.startsWith("/")) {
142
- const response = await context.skill(message);
143
- if (response && typeof response.message === "string") {
144
- let msg = parseMarkdown(response.message);
145
- chatMemory.addEntry(address, {
146
- role: "system",
147
- content: msg,
148
- });
149
- await context.send(response.message);
150
- }
151
- } else {
152
- await context.send(message);
153
- }
154
- }
155
- }
156
- export function parseMarkdown(message: string) {
157
- let trimmedMessage = message;
158
- // Remove bold and underline markdown
159
- trimmedMessage = trimmedMessage?.replace(/(\*\*|__)(.*?)\1/g, "$2");
160
- // Remove markdown links, keeping only the URL
161
- trimmedMessage = trimmedMessage?.replace(/\[([^\]]+)\]\(([^)]+)\)/g, "$2");
162
- // Remove markdown headers
163
- trimmedMessage = trimmedMessage?.replace(/^#+\s*(.*)$/gm, "$1");
164
- // Remove inline code formatting
165
- trimmedMessage = trimmedMessage?.replace(/`([^`]+)`/g, "$1");
166
- // Remove single backticks at the start or end of the message
167
- trimmedMessage = trimmedMessage?.replace(/^`|`$/g, "");
168
- // Remove leading and trailing whitespace
169
- trimmedMessage = trimmedMessage?.replace(/^\s+|\s+$/g, "");
170
- // Remove any remaining leading or trailing whitespace
171
- trimmedMessage = trimmedMessage.trim();
172
-
173
- return trimmedMessage;
174
- }
@@ -1,151 +0,0 @@
1
- import type { Client } from "@xmtp/xmtp-js";
2
- import { isAddress } from "viem";
3
- import type { HandlerContext } from "@xmtp/message-kit";
4
-
5
- export const converseEndpointURL =
6
- "https://converse-website-git-endpoit-ephemerahq.vercel.app";
7
- //export const converseEndpointURL = "http://localhost:3000";
8
-
9
- export type InfoCache = Map<string, UserInfo>;
10
- export type ConverseProfile = {
11
- address: string | null;
12
- onXmtp: boolean;
13
- avatar: string | null;
14
- formattedName: string | null;
15
- name: string | null;
16
- };
17
- export type UserInfo = {
18
- ensDomain?: string | undefined;
19
- address?: string | undefined;
20
- preferredName: string | undefined;
21
- converseUsername?: string | undefined;
22
- ensInfo?: EnsData | undefined;
23
- avatar?: string | undefined;
24
- };
25
- export interface EnsData {
26
- address?: string;
27
- avatar?: string;
28
- avatar_small?: string;
29
- converse?: string;
30
- avatar_url?: string;
31
- contentHash?: string;
32
- description?: string;
33
- ens?: string;
34
- ens_primary?: string;
35
- github?: string;
36
- resolverAddress?: string;
37
- twitter?: string;
38
- url?: string;
39
- wallets?: {
40
- eth?: string;
41
- };
42
- }
43
-
44
- let infoCache: InfoCache = new Map();
45
-
46
- export const clearInfoCache = () => {
47
- infoCache.clear();
48
- };
49
- export const getUserInfo = async (
50
- key: string,
51
- clientAddress?: string,
52
- context?: HandlerContext,
53
- ): Promise<UserInfo | null> => {
54
- let data: UserInfo = infoCache.get(key) || {
55
- ensDomain: undefined,
56
- address: undefined,
57
- converseUsername: undefined,
58
- ensInfo: undefined,
59
- preferredName: undefined,
60
- };
61
- if (isAddress(clientAddress || "")) {
62
- data.address = clientAddress;
63
- } else if (isAddress(key || "")) {
64
- data.address = key;
65
- } else if (key?.includes(".eth")) {
66
- data.ensDomain = key;
67
- } else if (key == "@user" || key == "@me" || key == "@bot") {
68
- data.address = clientAddress;
69
- data.ensDomain = key.replace("@", "") + ".eth";
70
- data.converseUsername = key.replace("@", "");
71
- } else if (key == "@alix") {
72
- data.address = "0x3a044b218BaE80E5b9E16609443A192129A67BeA";
73
- data.converseUsername = "alix";
74
- } else if (key == "@bo") {
75
- data.address = "0xbc3246461ab5e1682baE48fa95172CDf0689201a";
76
- data.converseUsername = "bo";
77
- } else {
78
- data.converseUsername = key;
79
- }
80
- data.preferredName = data.ensDomain || data.converseUsername || "Friend";
81
- let keyToUse = data.address || data.ensDomain || data.converseUsername;
82
- let cacheData = keyToUse && infoCache.get(keyToUse);
83
- //console.log("Getting user info", { cacheData, keyToUse, data });
84
- if (cacheData) return cacheData;
85
-
86
- context?.send(
87
- "Hey there! Give me a sec while I fetch info about you first...",
88
- );
89
- if (keyToUse?.includes(".eth")) {
90
- const response = await fetch(`https://ensdata.net/${keyToUse}`);
91
- const ensData: EnsData = (await response.json()) as EnsData;
92
- //console.log("Ens data", ensData);
93
- if (ensData) {
94
- data.ensInfo = ensData;
95
- data.ensDomain = ensData?.ens;
96
- data.address = ensData?.address;
97
- }
98
- } else if (keyToUse) {
99
- keyToUse = keyToUse.replace("@", "");
100
- const response = await fetch(`${converseEndpointURL}/profile/${keyToUse}`, {
101
- method: "POST",
102
- headers: {
103
- "Content-Type": "application/json",
104
- Accept: "application/json",
105
- },
106
- body: JSON.stringify({
107
- peer: keyToUse,
108
- }),
109
- });
110
- const converseData = (await response.json()) as ConverseProfile;
111
- /// if (process.env.MSG_LOG === "true")
112
- //console.log("Converse data", keyToUse, converseData);
113
- data.converseUsername =
114
- converseData?.formattedName || converseData?.name || undefined;
115
- data.address = converseData?.address || undefined;
116
- data.avatar = converseData?.avatar || undefined;
117
- }
118
-
119
- data.preferredName = data.ensDomain || data.converseUsername || "Friend";
120
- if (data.address) infoCache.set(data.address, data);
121
- return data;
122
- };
123
- export const isOnXMTP = async (
124
- client: Client,
125
- domain: string | undefined,
126
- address: string | undefined,
127
- ) => {
128
- if (domain == "fabri.eth") return false;
129
- if (address) return (await client.canMessage([address])).length > 0;
130
- };
131
-
132
- export const PROMPT_USER_CONTENT = (userInfo: UserInfo) => {
133
- let { address, ensDomain, converseUsername, preferredName } = userInfo;
134
- let prompt = `
135
- User context:
136
- - Start by fetch their domain from or Convese username
137
- - Call the user by their name or domain, in case they have one
138
- - Ask for a name (if they don't have one) so you can suggest domains.
139
- - Users address is: ${address}`;
140
- if (preferredName) prompt += `\n- Users name is: ${preferredName}`;
141
- if (ensDomain) prompt += `\n- User ENS domain is: ${ensDomain}`;
142
- if (converseUsername)
143
- prompt += `\n- Converse username is: ${converseUsername}`;
144
-
145
- prompt = prompt.replace("{ADDRESS}", address || "");
146
- prompt = prompt.replace("{ENS_DOMAIN}", ensDomain || "");
147
- prompt = prompt.replace("{CONVERSE_USERNAME}", converseUsername || "");
148
- prompt = prompt.replace("{PREFERRED_NAME}", preferredName || "");
149
-
150
- return prompt;
151
- };
@@ -1,42 +0,0 @@
1
- import "dotenv/config";
2
-
3
- import OpenAI from "openai";
4
- const openai = new OpenAI({
5
- apiKey: process.env.OPEN_AI_API_KEY,
6
- });
7
-
8
- export async function vision(imageData: Uint8Array, systemPrompt: string) {
9
- const base64Image = Buffer.from(imageData).toString("base64");
10
- const dataUrl = `data:image/jpeg;base64,${base64Image}`;
11
-
12
- // Create a new thread for each vision request
13
- const visionMessages = [
14
- {
15
- role: "system",
16
- content: systemPrompt,
17
- },
18
- {
19
- role: "user",
20
- content: [
21
- { type: "text", text: systemPrompt },
22
- {
23
- type: "image_url",
24
- image_url: {
25
- url: dataUrl,
26
- },
27
- },
28
- ],
29
- },
30
- ];
31
-
32
- try {
33
- const response = await openai.chat.completions.create({
34
- model: "gpt-4o",
35
- messages: visionMessages as any,
36
- });
37
- return response.choices[0].message.content;
38
- } catch (error) {
39
- console.error("Failed to interpret image with OpenAI:", error);
40
- throw error;
41
- }
42
- }