openkbs 0.0.19 → 0.0.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/Docs.md CHANGED
@@ -9,9 +9,11 @@ src/
9
9
  │ ├── onRequest.js // Handles incoming user messages
10
10
  │ ├── onResponse.js // Handles outgoing LLM messages
11
11
  │ ├── onPublicAPIRequest.js // Handles public API requests
12
+ │ ├── onAddMessages.js // Handles messages added to the chat (NEW)
12
13
  │ ├── onRequest.json // Dependencies for onRequest handler
13
14
  │ ├── onResponse.json // Dependencies for onResponse handler
14
- └── onPublicAPIRequest.json // Dependencies for onPublicAPIRequest handler
15
+ ├── onPublicAPIRequest.json // Dependencies for onPublicAPIRequest handler
16
+ │ └── onAddMessages.json // Dependencies for onAddMessages handler (NEW)
15
17
  │── Frontend/
16
18
  │ ├── contentRender.js // Custom rendering logic for chat messages
17
19
  │ └── contentRender.json // Dependencies for the contentRender module
@@ -220,7 +222,6 @@ module.exports = {
220
222
  if (!kbId) return { error: "kbId is not provided" }
221
223
 
222
224
  try {
223
- // Encrypt specific fields if needed
224
225
  const myItem = {};
225
226
  for (const attribute of attributes) {
226
227
  const { attrName, encrypted } = attribute;
@@ -248,52 +249,98 @@ module.exports = {
248
249
 
249
250
  ```javascript
250
251
  // Example creating a "feedback" item
251
- const kbId = 'your-kb-id'; // Replace with your actual kbId
252
- const data = {
253
- action: "createItem",
254
- kbId: kbId, // knowledge base ID
255
- itemType: "feedback",
256
- attributes: [
257
- { attrType: "keyword1", attrName: "name", encrypted: true },
258
- { attrType: "text1", attrName: "feedbackText", encrypted: false }
259
- ],
260
- item: {
261
- name: "John Doe",
262
- feedbackText: "Great product!"
263
- }
264
- };
252
+ const createFeedback = async (kbId, name, text) => (
253
+ await fetch('https://chat.openkbs.com/publicAPIRequest', {
254
+ method: 'POST',
255
+ headers: { 'Content-Type': 'application/json' },
256
+ body: JSON.stringify({
257
+ action: "createItem",
258
+ kbId,
259
+ itemType: "feedback",
260
+ attributes: [
261
+ { attrType: "keyword1", attrName: "name", encrypted: true },
262
+ { attrType: "text1", attrName: "feedbackText", encrypted: false }
263
+ ],
264
+ item: { name, feedbackText: text }
265
+ })
266
+ })
267
+ ).json();
268
+ ```
265
269
 
266
- fetch('https://chat.openkbs.com/publicAPIRequest', { // Call the public API endpoint
267
- method: 'POST',
268
- headers: { 'Content-Type': 'application/json' },
269
- body: JSON.stringify(data)
270
- })
271
- .then(response => response.json())
272
- .then(data => {
273
- if (data.error) {
274
- console.error("Error creating item:", data.error);
275
- // Handle error (e.g., display error message)
276
- } else {
277
- console.log("Item created successfully:", data);
278
- // Handle success (e.g., update UI)
279
- }
280
- })
281
- .catch(error => {
282
- console.error("Network error:", error);
283
- // Handle network errors
284
- });
270
+ By utilizing `onPublicAPIRequest` and `openkbs.items`, you can build powerful integrations that allow external systems to store and manage data within your OpenKBS application without compromising security. This approach is especially valuable for scenarios like form submissions, webhooks, or any situation where direct, unauthenticated access to data storage is required. Remember to carefully consider security implications and implement necessary precautions.
271
+
272
+ ### `onAddMessages` Event Handler:
285
273
 
274
+ The `onAddMessages` handler allows you to intercept and process messages *as they are added to the chat*. This handler is triggered *after* the `onRequest` handler but *before* the message is sent to the LLM. It's particularly useful for scenarios where a third-party system or service sends messages directly to your OpenKBS application to perform an action.
286
275
 
276
+ **Example: User moderation:**
277
+
278
+ **1. Third-Party Service API request:**
279
+
280
+ ```javascript
281
+ // Example of a third-party system sending a chat message to OpenKBS
282
+ axios.post('https://chat.openkbs.com/', {
283
+ action: "chatAddMessages",
284
+ chatId: 'NSFW_CHAT_ID', // the chat id created to log and process NSFW message
285
+ messages: [{
286
+ role: "system",
287
+ content: JSON.stringify({
288
+ labels: ['adult', 'explicit'],
289
+ fileName: 'image.jpg',
290
+ path: '/uploads/image.jpg'
291
+ }),
292
+ msgId: `${Date.now()}-000000`
293
+ }],
294
+ apiKey: "YOUR_API_KEY",
295
+ kbId: "YOUR_KB_ID"
296
+ }, {
297
+ headers: { 'Content-Type': 'application/json' }
298
+ });
287
299
  ```
288
300
 
289
- By utilizing `onPublicAPIRequest` and `openkbs.items`, you can build powerful integrations that allow external systems to store and manage data within your OpenKBS application without compromising security. This approach is especially valuable for scenarios like form submissions, webhooks, or any situation where direct, unauthenticated access to data storage is required. Remember to carefully consider security implications and implement necessary precautions.
301
+ **2. `onAddMessages` Handler:**
302
+
303
+ ```javascript
304
+ // src/Events/onAddMessages.js
305
+ import * as actions from './actions.js';
306
+
307
+ export const handler = async (event) => {
308
+ const { messages, chatId } = event.payload;
309
+ let msgData;
310
+
311
+ // NSFW Chat Handler
312
+ if (chatId === 'NSFW_CHAT_ID') { // Check if the message is for the NSFW chat
313
+ try {
314
+ msgData = JSON.parse(messages[0].content); // Parse the message content (expecting JSON)
315
+ const { data } = await actions.getUser([null, msgData.kbId]); // Get user information
316
+ await actions.warnAccount([null, data.user.accountId, msgData?.labels]); // Issue a warning
317
+ await actions.deleteFile([null, msgData.path]); // Delete the offending file
318
+
319
+ // Return a system message confirming the action
320
+ return [
321
+ ...messages,
322
+ {
323
+ role: 'system',
324
+ msgId: Date.now() + '000000',
325
+ content: `### 👮‍♀️ System Actions:\nWarning issued and content removed`
326
+ }
327
+ ];
328
+ } catch (e) {
329
+ console.error("Error processing NSFW content:", e);
330
+ }
331
+ }
332
+
333
+ return messages; // Return messages unchanged if no action is taken
334
+ };
335
+
336
+ ```
290
337
 
291
- **Dependencies (onRequest.json, onResponse.json, onPublicAPIRequest.json):**
338
+ **Dependencies (onRequest.json, onResponse.json, etc.):**
292
339
 
293
340
  These files specify the NPM package dependencies required for the respective event handlers. They follow the standard `package.json` format.
294
341
 
295
342
  ```json
296
- // src/Events/onRequest.json, src/Events/onResponse.json, src/Events/onPublicAPIRequest.json
343
+ // src/Events/*.json
297
344
  {
298
345
  "dependencies": {
299
346
  "your-package": "^1.0.0"
@@ -550,7 +597,7 @@ The dependencies marked as `(fixed)` are not installed as additional dependencie
550
597
 
551
598
  These components and utilities are accessible directly within your `onRenderChatMessage` function, streamlining your custom development process.
552
599
 
553
- ### `msgIndex`
600
+ ### msgIndex
554
601
  ```javascript
555
602
  const onRenderChatMessage = async (params) => {
556
603
  const { msgIndex, messages } = params;
@@ -560,7 +607,7 @@ const onRenderChatMessage = async (params) => {
560
607
  };
561
608
  ```
562
609
 
563
- ### `messages`
610
+ ### messages
564
611
  ```javascript
565
612
  const onRenderChatMessage = async (params) => {
566
613
  const { messages } = params;
@@ -571,7 +618,7 @@ const onRenderChatMessage = async (params) => {
571
618
  };
572
619
  ```
573
620
 
574
- ### `setMessages`
621
+ ### setMessages
575
622
  ```javascript
576
623
  const onRenderChatMessage = async (params) => {
577
624
  const { setMessages, messages } = params;
@@ -580,7 +627,7 @@ const onRenderChatMessage = async (params) => {
580
627
  };
581
628
  ```
582
629
 
583
- ### `KB`
630
+ ### KB
584
631
  ```javascript
585
632
  const onRenderChatMessage = async (params) => {
586
633
  const { KB } = params;
@@ -589,7 +636,7 @@ const onRenderChatMessage = async (params) => {
589
636
  };
590
637
  ```
591
638
 
592
- ### `chatContainerRef`
639
+ ### chatContainerRef
593
640
  ```javascript
594
641
  const onRenderChatMessage = async (params) => {
595
642
  const { chatContainerRef } = params;
@@ -599,7 +646,7 @@ const onRenderChatMessage = async (params) => {
599
646
  };
600
647
  ```
601
648
 
602
- ### `RequestChatAPI`
649
+ ### RequestChatAPI
603
650
  ```javascript
604
651
  const onRenderChatMessage = async (params) => {
605
652
  const { RequestChatAPI, messages } = params;
@@ -608,7 +655,7 @@ const onRenderChatMessage = async (params) => {
608
655
  };
609
656
  ```
610
657
 
611
- ### `setSystemAlert`
658
+ ### setSystemAlert
612
659
  ```javascript
613
660
  const onRenderChatMessage = async (params) => {
614
661
  const { setSystemAlert } = params;
@@ -616,7 +663,7 @@ const onRenderChatMessage = async (params) => {
616
663
  };
617
664
  ```
618
665
 
619
- ### `setBlockingLoading`
666
+ ### setBlockingLoading
620
667
  ```javascript
621
668
  const onRenderChatMessage = async (params) => {
622
669
  const { setBlockingLoading } = params;
@@ -626,7 +673,7 @@ const onRenderChatMessage = async (params) => {
626
673
  };
627
674
  ```
628
675
 
629
- ### `blockingLoading`
676
+ ### blockingLoading
630
677
  ```javascript
631
678
  const onRenderChatMessage = async (params) => {
632
679
  const { blockingLoading } = params;
@@ -636,7 +683,7 @@ const onRenderChatMessage = async (params) => {
636
683
  };
637
684
  ```
638
685
 
639
- ### `sendButtonRef`
686
+ ### sendButtonRef
640
687
  ```javascript
641
688
  const onRenderChatMessage = async (params) => {
642
689
  const { sendButtonRef } = params;
@@ -646,7 +693,7 @@ const onRenderChatMessage = async (params) => {
646
693
  };
647
694
  ```
648
695
 
649
- ### `sendButtonRippleRef`
696
+ ### sendButtonRippleRef
650
697
  ```javascript
651
698
  const onRenderChatMessage = async (params) => {
652
699
  const { sendButtonRippleRef } = params;
@@ -656,7 +703,7 @@ const onRenderChatMessage = async (params) => {
656
703
  };
657
704
  ```
658
705
 
659
- ### `setInputValue`
706
+ ### setInputValue
660
707
  ```javascript
661
708
  const onRenderChatMessage = async (params) => {
662
709
  const { setInputValue } = params;
@@ -664,7 +711,7 @@ const onRenderChatMessage = async (params) => {
664
711
  };
665
712
  ```
666
713
 
667
- ### `renderSettings`
714
+ ### renderSettings
668
715
  ```javascript
669
716
  const onRenderChatMessage = async (params) => {
670
717
  const { renderSettings } = params;
@@ -672,7 +719,7 @@ const onRenderChatMessage = async (params) => {
672
719
  };
673
720
  ```
674
721
 
675
- ### `axios`
722
+ ### axios
676
723
  ```javascript
677
724
  const onRenderChatMessage = async (params) => {
678
725
  const { axios } = params;
@@ -681,7 +728,7 @@ const onRenderChatMessage = async (params) => {
681
728
  };
682
729
  ```
683
730
 
684
- ### `itemsAPI`
731
+ ### itemsAPI
685
732
  ```javascript
686
733
  const onRenderChatMessage = async (params) => {
687
734
  const { itemsAPI } = params;
@@ -690,7 +737,7 @@ const onRenderChatMessage = async (params) => {
690
737
  };
691
738
  ```
692
739
 
693
- ### `indexedDB`
740
+ ### indexedDB
694
741
  ```javascript
695
742
  const onRenderChatMessage = async (params) => {
696
743
  const { indexedDB } = params;
@@ -699,7 +746,7 @@ const onRenderChatMessage = async (params) => {
699
746
  };
700
747
  ```
701
748
 
702
- ### `generateMsgId`
749
+ ### generateMsgId
703
750
  ```javascript
704
751
  const onRenderChatMessage = async (params) => {
705
752
  const { generateMsgId } = params;
@@ -708,7 +755,7 @@ const onRenderChatMessage = async (params) => {
708
755
  };
709
756
  ```
710
757
 
711
- ### `kbUserData`
758
+ ### kbUserData
712
759
  ```javascript
713
760
  const onRenderChatMessage = async (params) => {
714
761
  const { kbUserData } = params;
package/ON_PREMISES.md ADDED
@@ -0,0 +1,215 @@
1
+
2
+ ### Running the Backend Locally (On-Premises)
3
+
4
+ To run the backend services of your AI application locally, follow these steps. This allows you to manage chat services, code execution, and AI LLM services on your own infrastructure.
5
+
6
+ #### Running the Chat Service Locally
7
+
8
+ 1. **Start the Chat Service**:
9
+ - Open a new terminal and navigate to the root folder of your application.
10
+ - Run the following command:
11
+
12
+ ```bash
13
+ npm run chat
14
+ ```
15
+
16
+ - If LocalStack is not installed, you will receive instructions on how to install it based on your platform.
17
+ - Open another terminal, navigate to `/tmp`, and install LocalStack using the suggested commands and retrun `npm run chat`
18
+
19
+
20
+ 4. **Configure OpenAI Key**:
21
+ - Enter your `OPENAI_KEY` when prompted. This key will be stored at `~/.openkbs/.env`.
22
+
23
+ 5. **Access the Local Chat Service**:
24
+ - Refresh your browser at `http://{kbId}.apps.localhost:38593/chat`.
25
+ - You will see "On-Premises" in green text, indicating that your OpenKBS instance is using the local chat server to communicate with the OpenAI streaming API.
26
+ - You can remove the cloud models options by setting `"enableCloudModels": false` in `config.json`
27
+
28
+ #### Running the Code Execution Service Locally
29
+
30
+ 1. **Start the Code Execution Service**:
31
+ - Open another terminal tab, navigate to the root folder of your KB app, and run:
32
+
33
+ ```bash
34
+ npm run code
35
+ ```
36
+
37
+ 2. **Enter Secrets**:
38
+ - You may be prompted to enter any secret placeholders in your `./src/Events/actions.js`. By default, this includes `googlesearch_api_key` and `googlesearch_engine_id`.
39
+ - You can press enter to skip, but for using Google Search as an AI tool, it's recommended to fill them. Google provides 100 free searches per day.
40
+
41
+ Congratulations! The LLM can now execute NodeJS code directly on your machine!
42
+
43
+ #### Enhancing Your Application with Code Execution
44
+
45
+ To utilize the code execution feature, follow these steps:
46
+
47
+ 1. **Update `contentRender.js`**:
48
+ - Modify your local `contentRender.js` file to match the version found at [contentRender.js](./examples/cloud-master/contentRender.js). This update will provide the necessary UI components for local code execution and response rendering.
49
+
50
+ 2. **Update `instructions.txt`**:
51
+ - Edit your local `instructions.txt` file to include the instructions found at [instructions.txt](./examples/cloud-master/instructions.txt). These instructions will guide the LLM on how to output code and other API commands for execution by the OpenKBS framework.
52
+
53
+ 3. **Push the new instructions**:
54
+ - we have to push the instructions which are stored encrypted at OpenKBS registry:
55
+ ```bash
56
+ openkbs push origin app/instructions.txt
57
+ ```
58
+ - push to localstack to build and deploy all Node.js events - ./src/Events
59
+ ```bash
60
+ openkbs push localstack
61
+ ```
62
+ 4. **Requesting the AI to Perform Tasks on Your PC and AWS Cloud**:
63
+ - Instruct the AI to list your desktop files, review the code, click `execute`, and click `send`:
64
+ ```
65
+ List my desktop files
66
+ ```
67
+ - Instruct the AI to create an S3 bucket and backup your desktop images to it:
68
+ ```
69
+ Create an S3 bucket and back up my desktop images to it
70
+ ```
71
+ ![backup.png](examples%2Fcloud-master%2Fbackup.png)
72
+ ---
73
+
74
+ ## Installing openkbs-ai-server and Integrating Llama 3.1 and Stable Diffusion 3 Locally
75
+
76
+ ![llama-loaded.png](examples%2Fcloud-master%2Fllama-loaded.png)
77
+
78
+ To set up the `openkbs-ai-server` and integrate advanced AI models like Llama 3.1 and Stable Diffusion 3 on your local machine, follow the steps outlined below.
79
+
80
+ ### Prerequisites
81
+
82
+ Ensure you have the following prerequisites installed and configured:
83
+
84
+ - Ubuntu 22.04 or a compatible Linux distribution.
85
+ - Python 3.10 and virtual environment tools.
86
+ - Node.js and npm.
87
+ - NVIDIA or AMD GPU drivers, depending on your hardware.
88
+
89
+ Please follow the installation on [GitHub](https://github.com/open-kbs/openkbs-ai-server).
90
+
91
+ ### Step 1: Checkout, Build, and Run
92
+
93
+ Clone the `openkbs-ai-server` repository and set up the environment:
94
+
95
+ ```bash
96
+ git clone git@github.com:open-kbs/openkbs-ai-server.git
97
+ cd openkbs-ai-server/cluster
98
+ npm i
99
+ cd ..
100
+ python -m venv .env
101
+ source .env/bin/activate
102
+ ```
103
+
104
+ **IMPORTANT: SELECT THE CORRECT GPU INSTRUCTIONS BELOW. DO NOT EXECUTE BOTH.**
105
+
106
+ #### **FOR AMD GPUS:**
107
+
108
+ **ONLY FOLLOW THESE INSTRUCTIONS IF YOU HAVE AN AMD GPU.**
109
+
110
+ Install necessary libraries and Python packages:
111
+
112
+ ```bash
113
+ sudo apt-get install -y libjpeg-dev libpng-dev
114
+ pip install wheel setuptools
115
+ pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.1/
116
+ pip install -r ./models/requirements_AMD.txt
117
+ ```
118
+
119
+ #### **FOR NVIDIA GPUS:**
120
+
121
+ **ONLY FOLLOW THESE INSTRUCTIONS IF YOU HAVE AN NVIDIA GPU.**
122
+
123
+ Install the required Python packages:
124
+
125
+ ```bash
126
+ pip install torch
127
+ pip install -r ./models/requirements_NVIDIA.txt
128
+ ```
129
+
130
+ ### Step 2: Configure Hugging Face Authentication
131
+
132
+ Log in to Hugging Face to access the AI models:
133
+
134
+ ```bash
135
+ huggingface-cli login
136
+ ```
137
+
138
+ Enter your Hugging Face token when prompted.
139
+
140
+ ### Step 3: Install Global Node.js Packages
141
+
142
+ Install global Node.js packages required for running the server:
143
+
144
+ ```bash
145
+ npm install -g pm2 nodemon react-scripts
146
+ ```
147
+
148
+ ### Step 4: Start the AI Server
149
+
150
+ Launch the AI server using the provided script:
151
+
152
+ ```bash
153
+ ./start.sh
154
+ ```
155
+
156
+ This command will start both the frontend and backend services using pm2. Your default web browser should automatically open to [http://localhost:7080/register](http://localhost:7080/register), where you can register the admin account for the AI server.
157
+
158
+ ### Step 5: Install AI Models
159
+
160
+ In the AI server admin panel, search for and install the following models:
161
+
162
+ - **Llama-3.1-8B-Instruct**: Ensure you have access to [Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on Hugging Face.
163
+ - **Stable Diffusion 3**: Ensure you have access to [Stable Diffusion 3](https://huggingface.co/stabilityai/stable-diffusion-3-medium) on Hugging Face.
164
+
165
+ After installation, restart your chat server to apply the changes.
166
+
167
+ ### Step 6: Integrate Stable Diffusion under Events actions, so that Llama can call it
168
+
169
+ Go to your app root folder
170
+ ```bash
171
+ cd my-pc-agent
172
+ ```
173
+
174
+ Add to `./src/Events/actions.js`
175
+ ```javascript
176
+ [/\/?textToImage\("(.*)"\)/, async (match) => {
177
+ try {
178
+ const response = await axios.get(`http://localhost:8080/pipe/stabilityai--stable-diffusion-3-medium-diffusers--default?prompt=${encodeURIComponent(match[1])}`, {
179
+ responseType: 'arraybuffer'
180
+ });
181
+
182
+ const base64Data = Buffer.from(response.data, 'binary').toString('base64');
183
+ const contentType = response.headers['content-type'];
184
+ const imageSrc = `data:${contentType};base64,${base64Data}`;
185
+
186
+ return { type: 'SAVED_CHAT_IMAGE', imageSrc, ...meta };
187
+ } catch (error) {
188
+ console.error('Error fetching image:', error);
189
+ throw error; // or handle the error as needed
190
+ }
191
+ }]
192
+ ```
193
+
194
+ Push the changes:
195
+ ```bash
196
+ openkbs push localstack
197
+ ```
198
+ ### Step 7: Test Llama agent
199
+
200
+ Once the models are installed and the server is running, select `Llama-3.1-8B-Inst` in your Chat Models selection and type in the chat:
201
+
202
+ ```
203
+ Hey Llama, search Google for the latest AI news and wait, then generate news image. Finally, use a template function to create an HTML page hosted on the S3 bucket 'ai-news-openkbs'.
204
+ ```
205
+
206
+ ![ai1.png](examples%2Fcloud-master%2Fai1.png)
207
+
208
+ ![llama-loaded.png](examples%2Fcloud-master%2Fllama-loaded.png)
209
+
210
+ ![sd3-loaded.png](examples%2Fcloud-master%2Fsd3-loaded.png)
211
+
212
+ ![ai2.png](examples%2Fcloud-master%2Fai2.png)
213
+
214
+ ![ai3.png](examples%2Fcloud-master%2Fai3.png)
215
+ Have fun!
package/README.md CHANGED
@@ -1,51 +1,25 @@
1
- # OpenKBS · [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://github.com/open-kbs/openkbs-chat/blob/main/LICENSE) [![npm version](https://img.shields.io/badge/npm-v0.0.10-orange.svg)](https://www.npmjs.com/package/openkbs)
2
-
3
- OpenKBS is an open-source platform for building and deploying AI agents and applications. Our mission is to provide developers with a flexible and powerful framework that empowers them to create advanced AI agents with ease, using simple text prompts to specify requirements.
1
+ # OpenKBS · [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://github.com/open-kbs/openkbs-chat/blob/main/LICENSE) [![npm version](https://img.shields.io/badge/npm-v0.0.20-orange.svg)](https://www.npmjs.com/package/openkbs)
4
2
 
3
+ OpenKBS is an extendable open-source platform designed to build, deploy and integrate AI agents anywhere - from websites to IoT devices. Its event-driven architecture enables full customization of backend and frontend components, while the LLM abstraction layer allows seamless switching between language models.
5
4
  ### Last Updates
6
- - openkbs-ai-server: added support for stable-diffusion-3.5-large
7
5
 
8
6
  ### Table of Contents
9
7
 
10
-
11
- - [Creating Your First AI Agent Manually](#creating-your-first-ai-agent-manually)
12
- - [Step 1: Install CLI](#step-1-install-cli)
13
- - [Step 2: Create new Application](#step-2-create-new-application)
14
- - [Step 3: Understand the Project Structure](#step-3-understand-the-project-structure)
15
- - [Step 4: Deploy Your Application](#step-4-deploy-your-application)
16
- - [Step 5: Enhance Your Application](#step-5-enhance-your-application)
17
- - [Step 6: Local Development](#step-6-local-development)
18
- - [Step 7: Use Built-in MUI Components](#step-7-use-built-in-mui-components)
19
- - [Step 8: Running the Backend Locally (On-Premises)](#step-8-running-the-backend-locally-on-premises)
20
- - [Installing openkbs-ai-server and Integrating Llama 3.1 and Stable Diffusion 3 Locally](#installing-openkbs-ai-server-and-integrating-llama-31-and-stable-diffusion-3-locally)
8
+ - [Install CLI](#install-cli)
9
+ - [Create new Application](#create-new-application)
10
+ - [Deploy Your Application](#deploy-your-application)
11
+ - [Enhance Your Application](#enhance-your-application)
12
+ - [Local Development](#local-development)
13
+ - [Use Built-in MUI Components](#use-built-in-mui-components)
21
14
  - [License](#license)
22
15
  - [Contributing](#contributing)
23
16
  - [Contact](#contact)
24
17
 
25
- ## Installation
26
-
27
- This module needs to be installed globally, so use the `-g` flag when installing:
28
-
29
- ```bash
30
- npm install -g openkbs
31
- ```
32
- ![ai1.png](examples%2Fcloud-master%2Fai1.png)
33
-
34
- ## Key Features
35
-
36
- [//]: # (- **Generative AI First**: An intuitive development interface designed for human beings. Employs generative AI tools to streamline the development life cycle, enabling rapid requirements gathering, system design, and deployment.)
37
- - **Seamless LLM Integration**: LLM abstraction layer providing a unified interface for various LLM vendors, such as OpenAI, Anthropic, and open-source models like LLaMA and Mistral. This layer allows one-click switching between LLMs without modifying source code, enabling seamless testing across models.
38
- - **Extensive Tooling**: Utilize a broad range of AI tools and services to build robust, scalable AI agents. This includes code execution, database engines, web browsing, image generation, embedding models, speech synthesis, and recognition. These tools enable LLMs to operate autonomously, with more resources continually being added.
39
- - **Open Source**: Provides developers with the freedom to customize, modify, and distribute the software freely.
40
-
41
- ---
42
-
43
-
44
18
  ## Creating Your First AI Agent Manually
45
19
 
46
20
  Follow these steps to create and deploy your first OpenKBS app using React and Node.js,
47
21
 
48
- ### Step 1: Install CLI
22
+ ### Install CLI
49
23
 
50
24
  First, ensure you have the OpenKBS CLI installed globally:
51
25
 
@@ -77,7 +51,7 @@ npm install -g openkbs
77
51
 
78
52
 
79
53
 
80
- ### Step 2: Create New Application
54
+ ### Create New Application
81
55
 
82
56
  Create a new application using the OpenKBS CLI:
83
57
 
@@ -91,21 +65,7 @@ Navigate into your newly created application directory:
91
65
  cd my-pc-agent
92
66
  ```
93
67
 
94
- ### Step 3: Understand the Project Structure
95
-
96
- Your application will have the following structure:
97
-
98
- - `./app/icon.png`: Application icon.
99
- - `./app/settings.json`: Application settings.
100
- - `./app/instructions.txt`: Agent instructions.
101
- - `./src/Events/actions.js`: Contains all backend actions (LLM commands).
102
- - `./src/Events/onRequest.js`: Event handler that executes a command on user input.
103
- - `./src/Events/onRequest.json`: Contains all npm package dependencies for onRequest module.
104
- - `./src/Events/onResponse.js`: Similar to `onRequest.js`, but executed against LLM output.
105
- - `./src/Events/onResponse.json`: Contains all npm package dependencies for onResponse module.
106
- - `./src/Frontend/contentRender.js`: Contains frontend components of your application.
107
- - `./src/Frontend/contentRender.json`: Contains all npm package dependencies for contentRender module.
108
- ### Step 4: Deploy Your Application
68
+ ### Deploy Your Application
109
69
 
110
70
  You have two options for deployment: OpenKBS Cloud or LocalStack.
111
71
 
@@ -127,7 +87,7 @@ You have two options for deployment: OpenKBS Cloud or LocalStack.
127
87
 
128
88
  3. Open the provided URL and interact with your application.
129
89
 
130
- ### Step 5: Enhance Your Application
90
+ ### Enhance Your Application
131
91
 
132
92
  To improve your application's rendering, you can use libraries like `react-markdown` for example.
133
93
 
@@ -162,7 +122,7 @@ To improve your application's rendering, you can use libraries like `react-markd
162
122
  openkbs push
163
123
  ```
164
124
 
165
- ### Step 6: Local Development
125
+ ### Local Development
166
126
 
167
127
  For faster frontend development, run the OpenKBS UI dev server locally:
168
128
 
@@ -173,7 +133,7 @@ For faster frontend development, run the OpenKBS UI dev server locally:
173
133
 
174
134
  This command opens a browser pointing to `localhost`, allowing automatic rebuilds of your frontend code locally.
175
135
 
176
- ### Step 7: Use Built-in MUI Components
136
+ ### Use Built-in MUI Components
177
137
 
178
138
  Enhance your UI with Material-UI components:
179
139
 
@@ -212,225 +172,6 @@ Enhance your UI with Material-UI components:
212
172
  openkbs push
213
173
  ```
214
174
 
215
- ---
216
-
217
- ### Step 8: Running the Backend Locally (On-Premises)
218
-
219
- To run the backend services of your AI application locally, follow these steps. This allows you to manage chat services, code execution, and AI LLM services on your own infrastructure.
220
-
221
- #### Running the Chat Service Locally
222
-
223
- 1. **Start the Chat Service**:
224
- - Open a new terminal and navigate to the root folder of your application.
225
- - Run the following command:
226
-
227
- ```bash
228
- npm run chat
229
- ```
230
-
231
- - If LocalStack is not installed, you will receive instructions on how to install it based on your platform.
232
- - Open another terminal, navigate to `/tmp`, and install LocalStack using the suggested commands and retrun `npm run chat`
233
-
234
-
235
- 4. **Configure OpenAI Key**:
236
- - Enter your `OPENAI_KEY` when prompted. This key will be stored at `~/.openkbs/.env`.
237
-
238
- 5. **Access the Local Chat Service**:
239
- - Refresh your browser at `http://{kbId}.apps.localhost:38593/chat`.
240
- - You will see "On-Premises" in green text, indicating that your OpenKBS instance is using the local chat server to communicate with the OpenAI streaming API.
241
- - You can remove the cloud models options by setting `"enableCloudModels": false` in `config.json`
242
-
243
- #### Running the Code Execution Service Locally
244
-
245
- 1. **Start the Code Execution Service**:
246
- - Open another terminal tab, navigate to the root folder of your KB app, and run:
247
-
248
- ```bash
249
- npm run code
250
- ```
251
-
252
- 2. **Enter Secrets**:
253
- - You may be prompted to enter any secret placeholders in your `./src/Events/actions.js`. By default, this includes `googlesearch_api_key` and `googlesearch_engine_id`.
254
- - You can press enter to skip, but for using Google Search as an AI tool, it's recommended to fill them. Google provides 100 free searches per day.
255
-
256
- Congratulations! The LLM can now execute NodeJS code directly on your machine!
257
-
258
- #### Enhancing Your Application with Code Execution
259
-
260
- To utilize the code execution feature, follow these steps:
261
-
262
- 1. **Update `contentRender.js`**:
263
- - Modify your local `contentRender.js` file to match the version found at [contentRender.js](./examples/cloud-master/contentRender.js). This update will provide the necessary UI components for local code execution and response rendering.
264
-
265
- 2. **Update `instructions.txt`**:
266
- - Edit your local `instructions.txt` file to include the instructions found at [instructions.txt](./examples/cloud-master/instructions.txt). These instructions will guide the LLM on how to output code and other API commands for execution by the OpenKBS framework.
267
-
268
- 3. **Push the new instructions**:
269
- - we have to push the instructions which are stored encrypted at OpenKBS registry:
270
- ```bash
271
- openkbs push origin app/instructions.txt
272
- ```
273
- - push to localstack to build and deploy all Node.js events - ./src/Events
274
- ```bash
275
- openkbs push localstack
276
- ```
277
- 4. **Requesting the AI to Perform Tasks on Your PC and AWS Cloud**:
278
- - Instruct the AI to list your desktop files, review the code, click `execute`, and click `send`:
279
- ```
280
- List my desktop files
281
- ```
282
- - Instruct the AI to create an S3 bucket and backup your desktop images to it:
283
- ```
284
- Create an S3 bucket and back up my desktop images to it
285
- ```
286
- ![backup.png](examples%2Fcloud-master%2Fbackup.png)
287
- ---
288
-
289
- ## Installing openkbs-ai-server and Integrating Llama 3.1 and Stable Diffusion 3 Locally
290
-
291
- ![llama-loaded.png](examples%2Fcloud-master%2Fllama-loaded.png)
292
-
293
- To set up the `openkbs-ai-server` and integrate advanced AI models like Llama 3.1 and Stable Diffusion 3 on your local machine, follow the steps outlined below.
294
-
295
- ### Prerequisites
296
-
297
- Ensure you have the following prerequisites installed and configured:
298
-
299
- - Ubuntu 22.04 or a compatible Linux distribution.
300
- - Python 3.10 and virtual environment tools.
301
- - Node.js and npm.
302
- - NVIDIA or AMD GPU drivers, depending on your hardware.
303
-
304
- Please follow the installation on [GitHub](https://github.com/open-kbs/openkbs-ai-server).
305
-
306
- ### Step 1: Checkout, Build, and Run
307
-
308
- Clone the `openkbs-ai-server` repository and set up the environment:
309
-
310
- ```bash
311
- git clone git@github.com:open-kbs/openkbs-ai-server.git
312
- cd openkbs-ai-server/cluster
313
- npm i
314
- cd ..
315
- python -m venv .env
316
- source .env/bin/activate
317
- ```
318
-
319
- **IMPORTANT: SELECT THE CORRECT GPU INSTRUCTIONS BELOW. DO NOT EXECUTE BOTH.**
320
-
321
- #### **FOR AMD GPUS:**
322
-
323
- **ONLY FOLLOW THESE INSTRUCTIONS IF YOU HAVE AN AMD GPU.**
324
-
325
- Install necessary libraries and Python packages:
326
-
327
- ```bash
328
- sudo apt-get install -y libjpeg-dev libpng-dev
329
- pip install wheel setuptools
330
- pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.1/
331
- pip install -r ./models/requirements_AMD.txt
332
- ```
333
-
334
- #### **FOR NVIDIA GPUS:**
335
-
336
- **ONLY FOLLOW THESE INSTRUCTIONS IF YOU HAVE AN NVIDIA GPU.**
337
-
338
- Install the required Python packages:
339
-
340
- ```bash
341
- pip install torch
342
- pip install -r ./models/requirements_NVIDIA.txt
343
- ```
344
-
345
- ### Step 2: Configure Hugging Face Authentication
346
-
347
- Log in to Hugging Face to access the AI models:
348
-
349
- ```bash
350
- huggingface-cli login
351
- ```
352
-
353
- Enter your Hugging Face token when prompted.
354
-
355
- ### Step 3: Install Global Node.js Packages
356
-
357
- Install global Node.js packages required for running the server:
358
-
359
- ```bash
360
- npm install -g pm2 nodemon react-scripts
361
- ```
362
-
363
- ### Step 4: Start the AI Server
364
-
365
- Launch the AI server using the provided script:
366
-
367
- ```bash
368
- ./start.sh
369
- ```
370
-
371
- This command will start both the frontend and backend services using pm2. Your default web browser should automatically open to [http://localhost:7080/register](http://localhost:7080/register), where you can register the admin account for the AI server.
372
-
373
- ### Step 5: Install AI Models
374
-
375
- In the AI server admin panel, search for and install the following models:
376
-
377
- - **Llama-3.1-8B-Instruct**: Ensure you have access to [Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on Hugging Face.
378
- - **Stable Diffusion 3**: Ensure you have access to [Stable Diffusion 3](https://huggingface.co/stabilityai/stable-diffusion-3-medium) on Hugging Face.
379
-
380
- After installation, restart your chat server to apply the changes.
381
-
382
- ### Step 6: Integrate Stable Diffusion under Events actions, so that Llama can call it
383
-
384
- Go to your app root folder
385
- ```bash
386
- cd my-pc-agent
387
- ```
388
-
389
- Add to `./src/Events/actions.js`
390
- ```javascript
391
- [/\/?textToImage\("(.*)"\)/, async (match) => {
392
- try {
393
- const response = await axios.get(`http://localhost:8080/pipe/stabilityai--stable-diffusion-3-medium-diffusers--default?prompt=${encodeURIComponent(match[1])}`, {
394
- responseType: 'arraybuffer'
395
- });
396
-
397
- const base64Data = Buffer.from(response.data, 'binary').toString('base64');
398
- const contentType = response.headers['content-type'];
399
- const imageSrc = `data:${contentType};base64,${base64Data}`;
400
-
401
- return { type: 'SAVED_CHAT_IMAGE', imageSrc, ...meta };
402
- } catch (error) {
403
- console.error('Error fetching image:', error);
404
- throw error; // or handle the error as needed
405
- }
406
- }]
407
- ```
408
-
409
- Push the changes:
410
- ```bash
411
- openkbs push localstack
412
- ```
413
- ### Step 7: Test Llama agent
414
-
415
- Once the models are installed and the server is running, select `Llama-3.1-8B-Inst` in your Chat Models selection and type in the chat:
416
-
417
- ```
418
- Hey Llama, search Google for the latest AI news and wait, then generate news image. Finally, use a template function to create an HTML page hosted on the S3 bucket 'ai-news-openkbs'.
419
- ```
420
-
421
- ![ai1.png](examples%2Fcloud-master%2Fai1.png)
422
-
423
- ![llama-loaded.png](examples%2Fcloud-master%2Fllama-loaded.png)
424
-
425
- ![sd3-loaded.png](examples%2Fcloud-master%2Fsd3-loaded.png)
426
-
427
- ![ai2.png](examples%2Fcloud-master%2Fai2.png)
428
-
429
- ![ai3.png](examples%2Fcloud-master%2Fai3.png)
430
- Have fun!
431
-
432
- ---
433
-
434
175
  ## License
435
176
 
436
177
  This project is licensed under the MIT License. For more details, please refer to the [LICENSE](https://github.com/open-kbs/openkbs-chat/blob/main/LICENSE) file.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openkbs",
3
- "version": "0.0.19",
3
+ "version": "0.0.20",
4
4
  "description": "OpenKBS - Command Line Interface",
5
5
  "main": "src/index.js",
6
6
  "scripts": {
package/src/index.js CHANGED
@@ -119,15 +119,15 @@ Examples:
119
119
  $ openkbs deploy contentRender
120
120
  `);
121
121
 
122
- program
123
- .command('sign')
124
- .description('Signs a transaction to request OpenKBS service')
125
- .requiredOption('-a, --toAccountId <toAccountId>', 'Receiver account ID')
126
- .option('-e, --expires <expiresInSeconds>', 'Expiration time in seconds', '60')
127
- .option('-m, --maxAmount <maxAmount>', 'Maximum authorized charge', '300000')
128
- .option('-r, --resourceId <resourceId>', 'Resource ID', 'credits')
129
- .option('-p, --payload <payload>', 'Payload')
130
- .action(signAction);
122
+ // program
123
+ // .command('sign')
124
+ // .description('Signs a transaction to request OpenKBS service')
125
+ // .requiredOption('-a, --toAccountId <toAccountId>', 'Receiver account ID')
126
+ // .option('-e, --expires <expiresInSeconds>', 'Expiration time in seconds', '60')
127
+ // .option('-m, --maxAmount <maxAmount>', 'Maximum authorized charge', '300000')
128
+ // .option('-r, --resourceId <resourceId>', 'Resource ID', 'credits')
129
+ // .option('-p, --payload <payload>', 'Payload')
130
+ // .action(signAction);
131
131
 
132
132
  // Set up the CLI program
133
133
  program
@@ -141,13 +141,4 @@ program
141
141
  .description('Log out from OpenKBS by deleting the locally stored session token.')
142
142
  .action(logoutAction);
143
143
 
144
- // program
145
- // .command('evolve <featureDescription>')
146
- // .description('Evolve the application by providing additional feature requirements before deployment.')
147
- // .action(evolveApplication)
148
- // .addHelpText('after', `
149
- // Examples:
150
- // $ openkbs evolve "Add water tracking feature"
151
- // `);
152
-
153
144
  program.parse(process.argv);