@sprucelabs/sprucebot-llm 9.0.102 → 9.0.104
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +144 -50
- package/build/chat.js +11 -1
- package/build/esm/chat.js +11 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
# sprucebot-llm
|
|
1
|
+
# sprucebot-llm
|
|
2
2
|
A Typescript library for leveraging Large Langage Models (like GPT-3) to do... anything!
|
|
3
3
|
|
|
4
|
-
* Has memory
|
|
4
|
+
* [Has memory](#memory)
|
|
5
5
|
* Remembers past messages to build context
|
|
6
|
-
* Configure how much of the conversation your bot should remember
|
|
6
|
+
* Configure how much of the conversation your bot should remember
|
|
7
7
|
* [Manages state](#adding-state-to-your-conversation)
|
|
8
8
|
* The state builds as the conversation continues
|
|
9
9
|
* Invoke callbacks whenever state changes
|
|
@@ -14,71 +14,105 @@ A Typescript library for leveraging Large Langage Models (like GPT-3) to do... a
|
|
|
14
14
|
* Skill architecture for extensibility
|
|
15
15
|
* Leverage Skills to get your bot to complete any task!
|
|
16
16
|
* Adapter Interface to create your own adapters
|
|
17
|
-
* Only support
|
|
17
|
+
* Only support OpenAI models for now (more adapters based on demand)
|
|
18
18
|
* Fully typed
|
|
19
19
|
|
|
20
20
|
|
|
21
|
-
*In progress
|
|
22
21
|
## Getting started
|
|
23
22
|
|
|
24
|
-
### Install the library
|
|
23
|
+
### Install the library as a dependency
|
|
25
24
|
|
|
26
25
|
```bash
|
|
27
26
|
yarn add @sprucelabs/sprucebot-llm
|
|
28
27
|
```
|
|
29
28
|
|
|
30
|
-
|
|
31
|
-
|
|
29
|
+
```bash
|
|
30
|
+
npm install @sprucelabs/sprucebot-llm
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
### Cloning and testing
|
|
32
34
|
|
|
33
|
-
|
|
35
|
+
To clone the repository and prepare for development, do the following:
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
git clone https://github.com/sprucelabsai/sprucebot-llm.git
|
|
39
|
+
cd sprucebot-llm
|
|
40
|
+
yarn rebuild
|
|
41
|
+
code .
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
### Testing in out
|
|
45
|
+
You can use `sprucebot-llm` inside any Javascript runtime (nodejs, bun, browser).
|
|
46
|
+
|
|
47
|
+
If you want to try this locally, you can checkout `chat.ts`. Here are the contents of that file for you to explore.
|
|
34
48
|
|
|
35
49
|
```ts
|
|
36
|
-
import { stdin as input, stdout as output } from 'node:process'
|
|
50
|
+
import { stdimport { stdin as input, stdout as output } from 'node:process'
|
|
37
51
|
import * as readline from 'node:readline/promises'
|
|
38
52
|
import dotenv from 'dotenv'
|
|
39
|
-
import
|
|
53
|
+
import OpenAiAdapter from './bots/adapters/OpenAi'
|
|
40
54
|
import SprucebotLlmFactory from './bots/SprucebotLlmFactory'
|
|
41
|
-
|
|
55
|
+
import buildCallbackSkill from './examples/buildCallbackSkill'
|
|
56
|
+
import buildFileTransformerSkill from './examples/buildFileTransformerSkill'
|
|
57
|
+
import buildJokeSkill from './examples/buildJokeSkill'
|
|
58
|
+
import buildProfileSkill from './examples/buildProfileSkill'
|
|
59
|
+
import buildReceptionistSkill from './examples/buildReceptionistSkill'
|
|
42
60
|
dotenv.config({ quiet: true })
|
|
43
|
-
const rl = readline.createInterface({ input, output })
|
|
44
61
|
|
|
45
|
-
|
|
46
|
-
console.clear()
|
|
47
|
-
|
|
48
|
-
const adapter = new OpenAi(process.env.OPEN_AI_API_KEY!)
|
|
49
|
-
const bots = SprucebotLlmFactory.Factory()
|
|
50
|
-
|
|
51
|
-
const skill = bots.Skill({
|
|
52
|
-
yourJobIfYouChooseToAcceptItIs: 'to tell knock knock jokes!',
|
|
53
|
-
pleaseKeepInMindThat: [
|
|
54
|
-
'our audience is younger, so keep it PG!',
|
|
55
|
-
'you should never laugh when someone does not get the joke.',
|
|
56
|
-
"after each joke, you should tell me how many jokes you have left to tell before we're done.",
|
|
57
|
-
'you should acknowledge if someone laughs at your joke by saying "Thanks!" or "Glad you thought that was funny"!',
|
|
58
|
-
],
|
|
59
|
-
weAreDoneWhen: 'you have told 3 jokes!',
|
|
60
|
-
})
|
|
62
|
+
const rl = readline.createInterface({ input, output })
|
|
61
63
|
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
64
|
+
void (async () => {
|
|
65
|
+
console.clear()
|
|
66
|
+
|
|
67
|
+
// Create the adapter that handles actually sending the prompt to an LLM
|
|
68
|
+
const adapter = OpenAiAdapter.Adapter(process.env.OPEN_AI_API_KEY!)
|
|
69
|
+
|
|
70
|
+
// The LLmFactory is a layer of abstraction that simplifies bot creation
|
|
71
|
+
// and enables test doubling (mocks, spies, etc)
|
|
72
|
+
const bots = SprucebotLlmFactory.Factory(adapter)
|
|
73
|
+
|
|
74
|
+
// Different examples of things you may want to play with (see line 34)
|
|
75
|
+
const skills = {
|
|
76
|
+
jokes: buildJokeSkill(bots),
|
|
77
|
+
profile: buildProfileSkill(bots),
|
|
78
|
+
callbacks: buildCallbackSkill(bots),
|
|
79
|
+
fileTransformer: buildFileTransformerSkill(bots),
|
|
80
|
+
receptionist: buildReceptionistSkill(bots),
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// Construct a Bot installs and pass the skill of your choice
|
|
84
|
+
const bot = bots.Bot({
|
|
85
|
+
skill: skills.callbacks, //<-- try jokes, profile, etc.
|
|
86
|
+
youAre: "a bot named Sprucebot that is in test mode. At the start of every conversation, you introduce yourself and announce that you are in test mode so I don't get confused! You are both hip and adorable. You say things like, 'Jeepers' and 'Golly' or even 'Jeezey peezy'!",
|
|
87
|
+
})
|
|
88
|
+
|
|
89
|
+
do {
|
|
90
|
+
// Read from stdin
|
|
91
|
+
const input = await rl.question('Message > ')
|
|
92
|
+
|
|
93
|
+
// Send the message to the bot and log the response
|
|
94
|
+
// We use a callback because one message may trigger a conversation
|
|
95
|
+
// which will include many messages and bot.sendMessage(...) only
|
|
96
|
+
// returns the last message send back from the LLM
|
|
97
|
+
await bot.sendMessage(input, (message) => console.log('>', message))
|
|
98
|
+
} while (!bot.getIsDone())
|
|
99
|
+
|
|
100
|
+
console.log('Signing off...')
|
|
101
|
+
rl.close()
|
|
102
|
+
})()
|
|
68
103
|
|
|
69
|
-
|
|
70
|
-
const input = await rl.question('Message > ')
|
|
71
|
-
const response = await bot.sendMessage(input)
|
|
72
|
-
console.log('>', response)
|
|
73
|
-
} while (!bot.getIsDone())
|
|
104
|
+
```
|
|
74
105
|
|
|
75
|
-
|
|
76
|
-
rl.close()
|
|
77
|
-
})()
|
|
106
|
+
### Conversation Memory
|
|
78
107
|
|
|
108
|
+
Conversation Memory is the total number of messages that will be tracked during a conversation. Once the limit is hit, old messages will be popped off the stack and forgotten. Currently, you can only configure memory through you project's .env:
|
|
79
109
|
|
|
110
|
+
```env
|
|
111
|
+
OPENAI_MESSAGE_MEMORY_LIMIT=10
|
|
80
112
|
```
|
|
81
113
|
|
|
114
|
+
> *Note*: OpenAI is currently the only adapter supported. If you would like to see support for other adapters (or programattic ways to configure memory), please open an issue and we'll get on it! 🤘
|
|
115
|
+
|
|
82
116
|
### Adding state to your conversation
|
|
83
117
|
This library depends on [`@sprucelabs/spruce-schema`](https://github.com/sprucelabsai/spruce-schema) to handle the structure and validation rules around your state.
|
|
84
118
|
```ts
|
|
@@ -126,24 +160,84 @@ await skill.on('did-update-state', () => {
|
|
|
126
160
|
|
|
127
161
|
The approach to integrating 3rd party api's (as well as dropping in other dynamic data into responses) is straight forward.
|
|
128
162
|
|
|
163
|
+
In this contrived example, you can see where you'd implement the callbacks for `availableTimes`, `favoriteColor`, and `book` to actually call the APIs and return the results.
|
|
164
|
+
|
|
129
165
|
```ts
|
|
130
166
|
const skill = bots.Skill({
|
|
131
167
|
yourJobIfYouChooseToAcceptItIs:
|
|
132
|
-
"to be
|
|
168
|
+
"to be the best appointment taker on the planet. You have a many years of experience. You are going to ask me only 2 questions for this practice run. First, you'll ask me to pick an available time. Then, you'll ask me to pick my favorite color (make sure to call the api to see what times and colors i can choose from). After all is said and done, make sure to actually book the appointment!:",
|
|
169
|
+
weAreDoneWhen: 'the appointment is booked!',
|
|
170
|
+
pleaseKeepInMindThat: [
|
|
171
|
+
'people don\'t always know what they want, so be patient and guide them through the process.',
|
|
172
|
+
'We have cancelled our coloring services, so if you\'re asked about them, tell the user we\'ve discontinued them.'
|
|
173
|
+
],
|
|
133
174
|
callbacks: {
|
|
134
175
|
availableTimes: {
|
|
135
176
|
cb: async () => {
|
|
136
|
-
return [
|
|
137
|
-
'
|
|
138
|
-
|
|
177
|
+
return [
|
|
178
|
+
'9am',
|
|
179
|
+
'10am',
|
|
180
|
+
'11am',
|
|
181
|
+
'1pm',
|
|
182
|
+
'4pm',
|
|
183
|
+
'5pm',
|
|
184
|
+
'12am.',
|
|
185
|
+
].join('\n')
|
|
139
186
|
},
|
|
140
|
-
useThisWhenever: 'your are showing what times
|
|
187
|
+
useThisWhenever: 'your are showing what times i can pick from.',
|
|
141
188
|
},
|
|
142
189
|
favoriteColor: {
|
|
143
190
|
cb: async () => {
|
|
144
191
|
return ['red', 'blue', 'green', 'purple'].join('\n')
|
|
145
192
|
},
|
|
146
|
-
useThisWhenever:
|
|
193
|
+
useThisWhenever:
|
|
194
|
+
'your are showing what colors i can pick from.',
|
|
195
|
+
},
|
|
196
|
+
book: {
|
|
197
|
+
cb: async (options) => {
|
|
198
|
+
console.log('BOOKING OPTIONS', options)
|
|
199
|
+
return 'Appointment booked!'
|
|
200
|
+
},
|
|
201
|
+
useThisWhenever: 'You are ready to book an appointment!',
|
|
202
|
+
parameters: [
|
|
203
|
+
{
|
|
204
|
+
name: 'time',
|
|
205
|
+
isRequired: true,
|
|
206
|
+
type: 'string',
|
|
207
|
+
},
|
|
208
|
+
{
|
|
209
|
+
name: 'color',
|
|
210
|
+
isRequired: true,
|
|
211
|
+
type: 'string',
|
|
212
|
+
},
|
|
213
|
+
],
|
|
147
214
|
},
|
|
148
215
|
},
|
|
149
|
-
})
|
|
216
|
+
})
|
|
217
|
+
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
### Choosing a model
|
|
221
|
+
|
|
222
|
+
When you configure a `Skill` with your bot, you can specify the model that the skill will use. In other words, you can have different skills use different models depending on their requirements.
|
|
223
|
+
|
|
224
|
+
```ts
|
|
225
|
+
|
|
226
|
+
const bookingSkill = bots.Skill({
|
|
227
|
+
model: 'gpt-5',
|
|
228
|
+
yourJobIfYouChooseToAcceptItIs: 'to tell knock knock jokes!',
|
|
229
|
+
pleaseKeepInMindThat: [
|
|
230
|
+
'our audience is younger, so keep it PG!',
|
|
231
|
+
'you should never laugh when someone does not get the joke.',
|
|
232
|
+
"after each joke, you should tell me how many jokes you have left to tell before we're done.",
|
|
233
|
+
'you should acknowledge if someone laughs at your joke by saying "Thanks!" or "Glad you thought that was funny"!',
|
|
234
|
+
],
|
|
235
|
+
weAreDoneWhen: 'you have told 3 jokes!',
|
|
236
|
+
})
|
|
237
|
+
|
|
238
|
+
const bookingBot = bots.Bot({
|
|
239
|
+
skill: bookingSkill,
|
|
240
|
+
youAre: "a bot named Sprucebot that is in test mode. At the start of every conversation, you introduce yourself and announce that you are in test mode so I don't get confused! You are both hip and adorable. You say things like, 'Jeepers' and 'Golly' or even 'Jeezey peezy'!",
|
|
241
|
+
})
|
|
242
|
+
|
|
243
|
+
```
|
package/build/chat.js
CHANGED
|
@@ -50,8 +50,12 @@ dotenv_1.default.config({ quiet: true });
|
|
|
50
50
|
const rl = readline.createInterface({ input: node_process_1.stdin, output: node_process_1.stdout });
|
|
51
51
|
void (async () => {
|
|
52
52
|
console.clear();
|
|
53
|
+
// Create the adapter that handles actually sending the prompt to an LLM
|
|
53
54
|
const adapter = OpenAi_1.default.Adapter(process.env.OPEN_AI_API_KEY);
|
|
55
|
+
// The LLmFactory is a layer of abstraction that simplifies bot creation
|
|
56
|
+
// and enables test doubling (mocks, spies, etc)
|
|
54
57
|
const bots = SprucebotLlmFactory_1.default.Factory(adapter);
|
|
58
|
+
// Different examples of things you may want to play with (see line 34)
|
|
55
59
|
const skills = {
|
|
56
60
|
jokes: (0, buildJokeSkill_1.default)(bots),
|
|
57
61
|
profile: (0, buildProfileSkill_1.default)(bots),
|
|
@@ -59,12 +63,18 @@ void (async () => {
|
|
|
59
63
|
fileTransformer: (0, buildFileTransformerSkill_1.default)(bots),
|
|
60
64
|
receptionist: (0, buildReceptionistSkill_1.default)(bots),
|
|
61
65
|
};
|
|
66
|
+
// Construct a Bot installs and pass the skill of your choice
|
|
62
67
|
const bot = bots.Bot({
|
|
63
|
-
skill: skills.callbacks,
|
|
68
|
+
skill: skills.callbacks, //<-- try jokes, profile, etc.
|
|
64
69
|
youAre: "a bot named Sprucebot that is in test mode. At the start of every conversation, you introduce yourself and announce that you are in test mode so I don't get confused! You are both hip and adorable. You say things like, 'Jeepers' and 'Golly' or even 'Jeezey peezy'!",
|
|
65
70
|
});
|
|
66
71
|
do {
|
|
72
|
+
// Read from stdin
|
|
67
73
|
const input = await rl.question('Message > ');
|
|
74
|
+
// Send the message to the bot and log the response
|
|
75
|
+
// We use a callback because one message may trigger a conversation
|
|
76
|
+
// which will include many messages and bot.sendMessage(...) only
|
|
77
|
+
// returns the last message send back from the LLM
|
|
68
78
|
await bot.sendMessage(input, (message) => console.log('>', message));
|
|
69
79
|
} while (!bot.getIsDone());
|
|
70
80
|
console.log('Signing off...');
|
package/build/esm/chat.js
CHANGED
|
@@ -21,8 +21,12 @@ dotenv.config({ quiet: true });
|
|
|
21
21
|
const rl = readline.createInterface({ input, output });
|
|
22
22
|
void (() => __awaiter(void 0, void 0, void 0, function* () {
|
|
23
23
|
console.clear();
|
|
24
|
+
// Create the adapter that handles actually sending the prompt to an LLM
|
|
24
25
|
const adapter = OpenAiAdapter.Adapter(process.env.OPEN_AI_API_KEY);
|
|
26
|
+
// The LLmFactory is a layer of abstraction that simplifies bot creation
|
|
27
|
+
// and enables test doubling (mocks, spies, etc)
|
|
25
28
|
const bots = SprucebotLlmFactory.Factory(adapter);
|
|
29
|
+
// Different examples of things you may want to play with (see line 34)
|
|
26
30
|
const skills = {
|
|
27
31
|
jokes: buildJokeSkill(bots),
|
|
28
32
|
profile: buildProfileSkill(bots),
|
|
@@ -30,12 +34,18 @@ void (() => __awaiter(void 0, void 0, void 0, function* () {
|
|
|
30
34
|
fileTransformer: buildFileTransformerSkill(bots),
|
|
31
35
|
receptionist: buildReceptionistSkill(bots),
|
|
32
36
|
};
|
|
37
|
+
// Construct a Bot installs and pass the skill of your choice
|
|
33
38
|
const bot = bots.Bot({
|
|
34
|
-
skill: skills.callbacks,
|
|
39
|
+
skill: skills.callbacks, //<-- try jokes, profile, etc.
|
|
35
40
|
youAre: "a bot named Sprucebot that is in test mode. At the start of every conversation, you introduce yourself and announce that you are in test mode so I don't get confused! You are both hip and adorable. You say things like, 'Jeepers' and 'Golly' or even 'Jeezey peezy'!",
|
|
36
41
|
});
|
|
37
42
|
do {
|
|
43
|
+
// Read from stdin
|
|
38
44
|
const input = yield rl.question('Message > ');
|
|
45
|
+
// Send the message to the bot and log the response
|
|
46
|
+
// We use a callback because one message may trigger a conversation
|
|
47
|
+
// which will include many messages and bot.sendMessage(...) only
|
|
48
|
+
// returns the last message send back from the LLM
|
|
39
49
|
yield bot.sendMessage(input, (message) => console.log('>', message));
|
|
40
50
|
} while (!bot.getIsDone());
|
|
41
51
|
console.log('Signing off...');
|