@push.rocks/smartai 0.2.0 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@
3
3
  */
4
4
  export const commitinfo = {
5
5
  name: '@push.rocks/smartai',
6
- version: '0.2.0',
6
+ version: '0.3.2',
7
7
  description: 'A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.'
8
8
  };
9
9
  //# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiMDBfY29tbWl0aW5mb19kYXRhLmpzIiwic291cmNlUm9vdCI6IiIsInNvdXJjZXMiOlsiLi4vdHMvMDBfY29tbWl0aW5mb19kYXRhLnRzIl0sIm5hbWVzIjpbXSwibWFwcGluZ3MiOiJBQUFBOztHQUVHO0FBQ0gsTUFBTSxDQUFDLE1BQU0sVUFBVSxHQUFHO0lBQ3hCLElBQUksRUFBRSxxQkFBcUI7SUFDM0IsT0FBTyxFQUFFLE9BQU87SUFDaEIsV0FBVyxFQUFFLCtJQUErSTtDQUM3SixDQUFBIn0=
@@ -0,0 +1,43 @@
1
+ import * as plugins from './plugins.js';
2
+ import { MultiModalModel } from './abstract.classes.multimodal.js';
3
+ export interface IXAIProviderOptions {
4
+ xaiToken: string;
5
+ }
6
+ export declare class XAIProvider extends MultiModalModel {
7
+ private options;
8
+ openAiApiClient: plugins.openai.default;
9
+ smartpdfInstance: plugins.smartpdf.SmartPdf;
10
+ constructor(optionsArg: IXAIProviderOptions);
11
+ start(): Promise<void>;
12
+ stop(): Promise<void>;
13
+ chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>>;
14
+ chat(optionsArg: {
15
+ systemMessage: string;
16
+ userMessage: string;
17
+ messageHistory: {
18
+ role: string;
19
+ content: string;
20
+ }[];
21
+ }): Promise<{
22
+ role: 'assistant';
23
+ message: string;
24
+ }>;
25
+ audio(optionsArg: {
26
+ message: string;
27
+ }): Promise<NodeJS.ReadableStream>;
28
+ vision(optionsArg: {
29
+ image: Buffer;
30
+ prompt: string;
31
+ }): Promise<string>;
32
+ document(optionsArg: {
33
+ systemMessage: string;
34
+ userMessage: string;
35
+ pdfDocuments: Uint8Array[];
36
+ messageHistory: {
37
+ role: string;
38
+ content: string;
39
+ }[];
40
+ }): Promise<{
41
+ message: any;
42
+ }>;
43
+ }
@@ -0,0 +1,141 @@
1
+ import * as plugins from './plugins.js';
2
+ import * as paths from './paths.js';
3
+ import { MultiModalModel } from './abstract.classes.multimodal.js';
4
+ export class XAIProvider extends MultiModalModel {
5
+ constructor(optionsArg) {
6
+ super();
7
+ this.options = optionsArg;
8
+ }
9
+ async start() {
10
+ this.openAiApiClient = new plugins.openai.default({
11
+ apiKey: this.options.xaiToken,
12
+ baseURL: 'https://api.x.ai/v1',
13
+ });
14
+ this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
15
+ }
16
+ async stop() { }
17
+ async chatStream(input) {
18
+ // Create a TextDecoder to handle incoming chunks
19
+ const decoder = new TextDecoder();
20
+ let buffer = '';
21
+ let currentMessage = null;
22
+ // Create a TransformStream to process the input
23
+ const transform = new TransformStream({
24
+ async transform(chunk, controller) {
25
+ buffer += decoder.decode(chunk, { stream: true });
26
+ // Try to parse complete JSON messages from the buffer
27
+ while (true) {
28
+ const newlineIndex = buffer.indexOf('\n');
29
+ if (newlineIndex === -1)
30
+ break;
31
+ const line = buffer.slice(0, newlineIndex);
32
+ buffer = buffer.slice(newlineIndex + 1);
33
+ if (line.trim()) {
34
+ try {
35
+ const message = JSON.parse(line);
36
+ currentMessage = {
37
+ role: message.role || 'user',
38
+ content: message.content || '',
39
+ };
40
+ }
41
+ catch (e) {
42
+ console.error('Failed to parse message:', e);
43
+ }
44
+ }
45
+ }
46
+ // If we have a complete message, send it to X.AI
47
+ if (currentMessage) {
48
+ const stream = await this.openAiApiClient.chat.completions.create({
49
+ model: 'grok-2-latest',
50
+ messages: [{ role: currentMessage.role, content: currentMessage.content }],
51
+ stream: true,
52
+ });
53
+ // Process each chunk from X.AI
54
+ for await (const chunk of stream) {
55
+ const content = chunk.choices[0]?.delta?.content;
56
+ if (content) {
57
+ controller.enqueue(content);
58
+ }
59
+ }
60
+ currentMessage = null;
61
+ }
62
+ },
63
+ flush(controller) {
64
+ if (buffer) {
65
+ try {
66
+ const message = JSON.parse(buffer);
67
+ controller.enqueue(message.content || '');
68
+ }
69
+ catch (e) {
70
+ console.error('Failed to parse remaining buffer:', e);
71
+ }
72
+ }
73
+ }
74
+ });
75
+ // Connect the input to our transform stream
76
+ return input.pipeThrough(transform);
77
+ }
78
+ async chat(optionsArg) {
79
+ // Prepare messages array with system message, history, and user message
80
+ const messages = [
81
+ { role: 'system', content: optionsArg.systemMessage },
82
+ ...optionsArg.messageHistory.map(msg => ({
83
+ role: msg.role,
84
+ content: msg.content
85
+ })),
86
+ { role: 'user', content: optionsArg.userMessage }
87
+ ];
88
+ // Call X.AI's chat completion API
89
+ const completion = await this.openAiApiClient.chat.completions.create({
90
+ model: 'grok-2-latest',
91
+ messages: messages,
92
+ stream: false,
93
+ });
94
+ // Return the assistant's response
95
+ return {
96
+ role: 'assistant',
97
+ message: completion.choices[0]?.message?.content || ''
98
+ };
99
+ }
100
+ async audio(optionsArg) {
101
+ throw new Error('Audio generation is not supported by X.AI');
102
+ }
103
+ async vision(optionsArg) {
104
+ throw new Error('Vision tasks are not supported by X.AI');
105
+ }
106
+ async document(optionsArg) {
107
+ // First convert PDF documents to images
108
+ let pdfDocumentImageBytesArray = [];
109
+ for (const pdfDocument of optionsArg.pdfDocuments) {
110
+ const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
111
+ pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
112
+ }
113
+ // Convert images to base64 for inclusion in the message
114
+ const imageBase64Array = pdfDocumentImageBytesArray.map(bytes => Buffer.from(bytes).toString('base64'));
115
+ // Combine document images into the user message
116
+ const enhancedUserMessage = `
117
+ ${optionsArg.userMessage}
118
+
119
+ Document contents (as images):
120
+ ${imageBase64Array.map((img, i) => `Image ${i + 1}: <image data>`).join('\n')}
121
+ `;
122
+ // Use chat completion to analyze the documents
123
+ const messages = [
124
+ { role: 'system', content: optionsArg.systemMessage },
125
+ ...optionsArg.messageHistory.map(msg => ({
126
+ role: msg.role,
127
+ content: msg.content
128
+ })),
129
+ { role: 'user', content: enhancedUserMessage }
130
+ ];
131
+ const completion = await this.openAiApiClient.chat.completions.create({
132
+ model: 'grok-2-latest',
133
+ messages: messages,
134
+ stream: false,
135
+ });
136
+ return {
137
+ message: completion.choices[0]?.message?.content || ''
138
+ };
139
+ }
140
+ }
141
+ //# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoicHJvdmlkZXIueGFpLmpzIiwic291cmNlUm9vdCI6IiIsInNvdXJjZXMiOlsiLi4vdHMvcHJvdmlkZXIueGFpLnRzIl0sIm5hbWVzIjpbXSwibWFwcGluZ3MiOiJBQUFBLE9BQU8sS0FBSyxPQUFPLE1BQU0sY0FBYyxDQUFDO0FBQ3hDLE9BQU8sS0FBSyxLQUFLLE1BQU0sWUFBWSxDQUFDO0FBQ3BDLE9BQU8sRUFBRSxlQUFlLEVBQUUsTUFBTSxrQ0FBa0MsQ0FBQztBQVFuRSxNQUFNLE9BQU8sV0FBWSxTQUFRLGVBQWU7SUFLOUMsWUFBWSxVQUErQjtRQUN6QyxLQUFLLEVBQUUsQ0FBQztRQUNSLElBQUksQ0FBQyxPQUFPLEdBQUcsVUFBVSxDQUFDO0lBQzVCLENBQUM7SUFFTSxLQUFLLENBQUMsS0FBSztRQUNoQixJQUFJLENBQUMsZUFBZSxHQUFHLElBQUksT0FBTyxDQUFDLE1BQU0sQ0FBQyxPQUFPLENBQUM7WUFDaEQsTUFBTSxFQUFFLElBQUksQ0FBQyxPQUFPLENBQUMsUUFBUTtZQUM3QixPQUFPLEVBQUUscUJBQXFCO1NBQy9CLENBQUMsQ0FBQztRQUNILElBQUksQ0FBQyxnQkFBZ0IsR0FBRyxJQUFJLE9BQU8sQ0FBQyxRQUFRLENBQUMsUUFBUSxFQUFFLENBQUM7SUFDMUQsQ0FBQztJQUVNLEtBQUssQ0FBQyxJQUFJLEtBQUksQ0FBQztJQUVmLEtBQUssQ0FBQyxVQUFVLENBQUMsS0FBaUM7UUFDdkQsaURBQWlEO1FBQ2pELE1BQU0sT0FBTyxHQUFHLElBQUksV0FBVyxFQUFFLENBQUM7UUFDbEMsSUFBSSxNQUFNLEdBQUcsRUFBRSxDQUFDO1FBQ2hCLElBQUksY0FBYyxHQUE4QyxJQUFJLENBQUM7UUFFckUsZ0RBQWdEO1FBQ2hELE1BQU0sU0FBUyxHQUFHLElBQUksZUFBZSxDQUFxQjtZQUN4RCxLQUFLLENBQUMsU0FBUyxDQUFDLEtBQUssRUFBRSxVQUFVO2dCQUMvQixNQUFNLElBQUksT0FBTyxDQUFDLE1BQU0sQ0FBQyxLQUFLLEVBQUUsRUFBRSxNQUFNLEVBQUUsSUFBSSxFQUFFLENBQUMsQ0FBQztnQkFFbEQsc0RBQXNEO2dCQUN0RCxPQUFPLElBQUksRUFBRSxDQUFDO29CQUNaLE1BQU0sWUFBWSxHQUFHLE1BQU0sQ0FBQyxPQUFPLENBQUMsSUFBSSxDQUFDLENBQUM7b0JBQzFDLElBQUksWUFBWSxLQUFLLENBQUMsQ0FBQzt3QkFBRSxNQUFNO29CQUUvQixNQUFNLElBQUksR0FBRyxNQUFNLENBQUMsS0FBSyxDQUFDLENBQUMsRUFBRSxZQUFZLENBQUMsQ0FBQztvQkFDM0MsTUFBTSxHQUFHLE1BQU0sQ0FBQyxLQUFLLENBQUMsWUFBWSxHQUFHLENBQUMsQ0FBQyxDQUFDO29CQUV4QyxJQUFJLElBQUksQ0FBQyxJQUFJLEVBQUUsRUFBRSxDQUFDO3dCQUNoQixJQUFJLENBQUM7NEJBQ0gsTUFBTSxPQUFPLEdBQUcsSUFBSSxDQUFDLEtBQUssQ0FBQyxJQUFJLENBQUMsQ0FBQzs0QkFDakMsY0FBYyxHQUFHO2dDQUNmLElBQUksRUFBRSxPQUFPLENBQUMsSUFBSSxJQUFJLE1BQU07Z0NBQzVCLE9BQU8sRUFBRSxPQUFPLENBQUMsT0FBTyxJQUFJLEVBQUU7NkJBQy9CLENBQUM7d0JBQ0osQ0FBQzt3QkFBQyxPQUFPLENBQUMsRUFBRSxDQUFDOzRCQUNYLE9BQU8sQ0FBQyxLQUFLLENBQUMsMEJBQTBCLEVBQUUsQ0FBQyxDQUFDLENBQUM7d0JBQy9DLENBQUM7b0JBQ0gsQ0FBQztnQkFDSCxDQUFDO2dCQUVELGlEQUFpRDtnQkFDakQsSUFBSSxjQUFjLEVBQUUsQ0FBQztvQkFDbkIsTUFBTSxNQUFNLEdBQUcsTUFBTSxJQUFJLENBQUMsZUFBZSxDQUFDLElBQUksQ0FBQyxXQUFXLENBQUMsTUFBTSxDQUFDO3dCQUNoRSxLQUFLLEVBQUUsZUFBZTt3QkFDdEIsUUFBUSxFQUFFLENBQUMsRUFBRSxJQUFJLEVBQUUsY0FBYyxDQUFDLElBQUksRUFBRSxPQUFPLEVBQUUsY0FBYyxDQUFDLE9BQU8sRUFBRSxDQUFDO3dCQUMxRSxNQUFNLEVBQUUsSUFBSTtxQkFDYixDQUFDLENBQUM7b0JBRUgsK0JBQStCO29CQUMvQixJQUFJLEtBQUssRUFBRSxNQUFNLEtBQUssSUFBSSxNQUFNLEVBQUUsQ0FBQzt3QkFDakMsTUFBTSxPQUFPLEdBQUcsS0FBSyxDQUFDLE9BQU8sQ0FBQyxDQUFDLENBQUMsRUFBRSxLQUFLLEVBQUUsT0FBTyxDQUFDO3dCQUNqRCxJQUFJLE9BQU8sRUFBRSxDQUFDOzRCQUNaLFVBQVUsQ0FBQyxPQUFPLENBQUMsT0FBTyxDQUFDLENBQUM7d0JBQzlCLENBQUM7b0JBQ0gsQ0FBQztvQkFFRCxjQUFjLEdBQUcsSUFBSSxDQUFDO2dCQUN4QixDQUFDO1lBQ0gsQ0FBQztZQUVELEtBQUssQ0FBQyxVQUFVO2dCQUNkLElBQUksTUFBTSxFQUFFLENBQUM7b0JBQ1gsSUFBSSxDQUFDO3dCQUNILE1BQU0sT0FBTyxHQUFHLElBQUksQ0FBQyxLQUFLLENBQUMsTUFBTSxDQUFDLENBQUM7d0JBQ25DLFVBQVUsQ0FBQyxPQUFPLENBQUMsT0FBTyxDQUFDLE9BQU8sSUFBSSxFQUFFLENBQUMsQ0FBQztvQkFDNUMsQ0FBQztvQkFBQyxPQUFPLENBQUMsRUFBRSxDQUFDO3dCQUNYLE9BQU8sQ0FBQyxLQUFLLENBQUMsbUNBQW1DLEVBQUUsQ0FBQyxDQUFDLENBQUM7b0JBQ3hELENBQUM7Z0JBQ0gsQ0FBQztZQUNILENBQUM7U0FDRixDQUFDLENBQUM7UUFFSCw0Q0FBNEM7UUFDNUMsT0FBTyxLQUFLLENBQUMsV0FBVyxDQUFDLFNBQVMsQ0FBQyxDQUFDO0lBQ3RDLENBQUM7SUFFTSxLQUFLLENBQUMsSUFBSSxDQUFDLFVBSWpCO1FBQ0Msd0VBQXdFO1FBQ3hFLE1BQU0sUUFBUSxHQUFpQztZQUM3QyxFQUFFLElBQUksRUFBRSxRQUFRLEVBQUUsT0FBTyxFQUFFLFVBQVUsQ0FBQyxhQUFhLEVBQUU7WUFDckQsR0FBRyxVQUFVLENBQUMsY0FBYyxDQUFDLEdBQUcsQ0FBQyxHQUFHLENBQUMsRUFBRSxDQUFDLENBQUM7Z0JBQ3ZDLElBQUksRUFBRSxHQUFHLENBQUMsSUFBdUM7Z0JBQ2pELE9BQU8sRUFBRSxHQUFHLENBQUMsT0FBTzthQUNyQixDQUFDLENBQUM7WUFDSCxFQUFFLElBQUksRUFBRSxNQUFNLEVBQUUsT0FBTyxFQUFFLFVBQVUsQ0FBQyxXQUFXLEVBQUU7U0FDbEQsQ0FBQztRQUVGLGtDQUFrQztRQUNsQyxNQUFNLFVBQVUsR0FBRyxNQUFNLElBQUksQ0FBQyxlQUFlLENBQUMsSUFBSSxDQUFDLFdBQVcsQ0FBQyxNQUFNLENBQUM7WUFDcEUsS0FBSyxFQUFFLGVBQWU7WUFDdEIsUUFBUSxFQUFFLFFBQVE7WUFDbEIsTUFBTSxFQUFFLEtBQUs7U0FDZCxDQUFDLENBQUM7UUFFSCxrQ0FBa0M7UUFDbEMsT0FBTztZQUNMLElBQUksRUFBRSxXQUFXO1lBQ2pCLE9BQU8sRUFBRSxVQUFVLENBQUMsT0FBTyxDQUFDLENBQUMsQ0FBQyxFQUFFLE9BQU8sRUFBRSxPQUFPLElBQUksRUFBRTtTQUN2RCxDQUFDO0lBQ0osQ0FBQztJQUVNLEtBQUssQ0FBQyxLQUFLLENBQUMsVUFBK0I7UUFDaEQsTUFBTSxJQUFJLEtBQUssQ0FBQywyQ0FBMkMsQ0FBQyxDQUFDO0lBQy9ELENBQUM7SUFFTSxLQUFLLENBQUMsTUFBTSxDQUFDLFVBQTZDO1FBQy9ELE1BQU0sSUFBSSxLQUFLLENBQUMsd0NBQXdDLENBQUMsQ0FBQztJQUM1RCxDQUFDO0lBRU0sS0FBSyxDQUFDLFFBQVEsQ0FBQyxVQUtyQjtRQUNDLHdDQUF3QztRQUN4QyxJQUFJLDBCQUEwQixHQUFpQixFQUFFLENBQUM7UUFFbEQsS0FBSyxNQUFNLFdBQVcsSUFBSSxVQUFVLENBQUMsWUFBWSxFQUFFLENBQUM7WUFDbEQsTUFBTSxrQkFBa0IsR0FBRyxNQUFNLElBQUksQ0FBQyxnQkFBZ0IsQ0FBQyxvQkFBb0IsQ0FBQyxXQUFXLENBQUMsQ0FBQztZQUN6RiwwQkFBMEIsR0FBRywwQkFBMEIsQ0FBQyxNQUFNLENBQUMsa0JBQWtCLENBQUMsQ0FBQztRQUNyRixDQUFDO1FBRUQsd0RBQXdEO1FBQ3hELE1BQU0sZ0JBQWdCLEdBQUcsMEJBQTBCLENBQUMsR0FBRyxDQUFDLEtBQUssQ0FBQyxFQUFFLENBQzlELE1BQU0sQ0FBQyxJQUFJLENBQUMsS0FBSyxDQUFDLENBQUMsUUFBUSxDQUFDLFFBQVEsQ0FBQyxDQUN0QyxDQUFDO1FBRUYsZ0RBQWdEO1FBQ2hELE1BQU0sbUJBQW1CLEdBQUc7UUFDeEIsVUFBVSxDQUFDLFdBQVc7OztRQUd0QixnQkFBZ0IsQ0FBQyxHQUFHLENBQUMsQ0FBQyxHQUFHLEVBQUUsQ0FBQyxFQUFFLEVBQUUsQ0FBQyxTQUFTLENBQUMsR0FBRyxDQUFDLGdCQUFnQixDQUFDLENBQUMsSUFBSSxDQUFDLElBQUksQ0FBQztLQUM5RSxDQUFDO1FBRUYsK0NBQStDO1FBQy9DLE1BQU0sUUFBUSxHQUFpQztZQUM3QyxFQUFFLElBQUksRUFBRSxRQUFRLEVBQUUsT0FBTyxFQUFFLFVBQVUsQ0FBQyxhQUFhLEVBQUU7WUFDckQsR0FBRyxVQUFVLENBQUMsY0FBYyxDQUFDLEdBQUcsQ0FBQyxHQUFHLENBQUMsRUFBRSxDQUFDLENBQUM7Z0JBQ3ZDLElBQUksRUFBRSxHQUFHLENBQUMsSUFBdUM7Z0JBQ2pELE9BQU8sRUFBRSxHQUFHLENBQUMsT0FBTzthQUNyQixDQUFDLENBQUM7WUFDSCxFQUFFLElBQUksRUFBRSxNQUFNLEVBQUUsT0FBTyxFQUFFLG1CQUFtQixFQUFFO1NBQy9DLENBQUM7UUFFRixNQUFNLFVBQVUsR0FBRyxNQUFNLElBQUksQ0FBQyxlQUFlLENBQUMsSUFBSSxDQUFDLFdBQVcsQ0FBQyxNQUFNLENBQUM7WUFDcEUsS0FBSyxFQUFFLGVBQWU7WUFDdEIsUUFBUSxFQUFFLFFBQVE7WUFDbEIsTUFBTSxFQUFFLEtBQUs7U0FDZCxDQUFDLENBQUM7UUFFSCxPQUFPO1lBQ0wsT0FBTyxFQUFFLFVBQVUsQ0FBQyxPQUFPLENBQUMsQ0FBQyxDQUFDLEVBQUUsT0FBTyxFQUFFLE9BQU8sSUFBSSxFQUFFO1NBQ3ZELENBQUM7SUFDSixDQUFDO0NBQ0YifQ==
package/license ADDED
@@ -0,0 +1,19 @@
1
+ Copyright (c) 2024 Task Venture Capital GmbH (hello@task.vc)
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ of this software and associated documentation files (the "Software"), to deal
5
+ in the Software without restriction, including without limitation the rights
6
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ copies of the Software, and to permit persons to whom the Software is
8
+ furnished to do so, subject to the following conditions:
9
+
10
+ The above copyright notice and this permission notice shall be included in all
11
+ copies or substantial portions of the Software.
12
+
13
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19
+ SOFTWARE.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@push.rocks/smartai",
3
- "version": "0.2.0",
3
+ "version": "0.3.2",
4
4
  "private": false,
5
5
  "description": "A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.",
6
6
  "main": "dist_ts/index.js",
package/readme.md CHANGED
@@ -1,80 +1,125 @@
1
1
  # @push.rocks/smartai
2
2
 
3
- Provides a standardized interface for integrating and conversing with multiple AI models, supporting operations like chat, streaming interactions, and audio responses.
4
-
5
- ## Install
6
-
7
- To add @push.rocks/smartai to your project, run the following command in your terminal:
3
+ [![npm version](https://badge.fury.io/js/%40push.rocks%2Fsmartai.svg)](https://www.npmjs.com/package/@push.rocks/smartai)
4
+
5
+ SmartAi is a comprehensive TypeScript library that provides a standardized interface for integrating and interacting with multiple AI models. It supports a range of operations from synchronous and streaming chat to audio generation, document processing, and vision tasks.
6
+
7
+ ## Table of Contents
8
+
9
+ - [Features](#features)
10
+ - [Installation](#installation)
11
+ - [Supported AI Providers](#supported-ai-providers)
12
+ - [Quick Start](#quick-start)
13
+ - [Usage Examples](#usage-examples)
14
+ - [Chat Interactions](#chat-interactions)
15
+ - [Streaming Chat](#streaming-chat)
16
+ - [Audio Generation](#audio-generation)
17
+ - [Document Processing](#document-processing)
18
+ - [Vision Processing](#vision-processing)
19
+ - [Error Handling](#error-handling)
20
+ - [Development](#development)
21
+ - [Running Tests](#running-tests)
22
+ - [Building the Project](#building-the-project)
23
+ - [Contributing](#contributing)
24
+ - [License](#license)
25
+ - [Legal Information](#legal-information)
26
+
27
+ ## Features
28
+
29
+ - **Unified API:** Seamlessly integrate multiple AI providers with a consistent interface.
30
+ - **Chat & Streaming:** Support for both synchronous and real-time streaming chat interactions.
31
+ - **Audio & Vision:** Generate audio responses and perform detailed image analysis.
32
+ - **Document Processing:** Analyze PDFs and other documents using vision models.
33
+ - **Extensible:** Easily extend the library to support additional AI providers.
34
+
35
+ ## Installation
36
+
37
+ To install SmartAi, run the following command:
8
38
 
9
39
  ```bash
10
40
  npm install @push.rocks/smartai
11
41
  ```
12
42
 
13
- This command installs the package and adds it to your project's dependencies.
43
+ This will add the package to your projects dependencies.
14
44
 
15
45
  ## Supported AI Providers
16
46
 
17
- @push.rocks/smartai supports multiple AI providers, each with its own unique capabilities:
47
+ SmartAi supports multiple AI providers. Configure each provider with its corresponding token or settings:
18
48
 
19
49
  ### OpenAI
20
- - Models: GPT-4, GPT-3.5-turbo, GPT-4-vision-preview
21
- - Features: Chat, Streaming, Audio Generation, Vision, Document Processing
22
- - Configuration:
50
+
51
+ - **Models:** GPT-4, GPT-3.5-turbo, GPT-4-vision-preview
52
+ - **Features:** Chat, Streaming, Audio Generation, Vision, Document Processing
53
+ - **Configuration Example:**
54
+
23
55
  ```typescript
24
56
  openaiToken: 'your-openai-token'
25
57
  ```
26
58
 
59
+ ### X.AI
60
+
61
+ - **Models:** Grok-2-latest
62
+ - **Features:** Chat, Streaming, Document Processing
63
+ - **Configuration Example:**
64
+
65
+ ```typescript
66
+ xaiToken: 'your-xai-token'
67
+ ```
68
+
27
69
  ### Anthropic
28
- - Models: Claude-3-opus-20240229
29
- - Features: Chat, Streaming, Vision, Document Processing
30
- - Configuration:
70
+
71
+ - **Models:** Claude-3-opus-20240229
72
+ - **Features:** Chat, Streaming, Vision, Document Processing
73
+ - **Configuration Example:**
74
+
31
75
  ```typescript
32
76
  anthropicToken: 'your-anthropic-token'
33
77
  ```
34
78
 
35
79
  ### Perplexity
36
- - Models: Mixtral-8x7b-instruct
37
- - Features: Chat, Streaming
38
- - Configuration:
80
+
81
+ - **Models:** Mixtral-8x7b-instruct
82
+ - **Features:** Chat, Streaming
83
+ - **Configuration Example:**
84
+
39
85
  ```typescript
40
86
  perplexityToken: 'your-perplexity-token'
41
87
  ```
42
88
 
43
89
  ### Groq
44
- - Models: Llama-3.3-70b-versatile
45
- - Features: Chat, Streaming
46
- - Configuration:
90
+
91
+ - **Models:** Llama-3.3-70b-versatile
92
+ - **Features:** Chat, Streaming
93
+ - **Configuration Example:**
94
+
47
95
  ```typescript
48
96
  groqToken: 'your-groq-token'
49
97
  ```
50
98
 
51
99
  ### Ollama
52
- - Models: Configurable (default: llama2, llava for vision/documents)
53
- - Features: Chat, Streaming, Vision, Document Processing
54
- - Configuration:
100
+
101
+ - **Models:** Configurable (default: llama2; use llava for vision/document tasks)
102
+ - **Features:** Chat, Streaming, Vision, Document Processing
103
+ - **Configuration Example:**
104
+
55
105
  ```typescript
56
- baseUrl: 'http://localhost:11434' // Optional
57
- model: 'llama2' // Optional
58
- visionModel: 'llava' // Optional, for vision and document tasks
106
+ ollama: {
107
+ baseUrl: 'http://localhost:11434', // Optional
108
+ model: 'llama2', // Optional
109
+ visionModel: 'llava' // Optional for vision and document tasks
110
+ }
59
111
  ```
60
112
 
61
- ## Usage
62
-
63
- The `@push.rocks/smartai` package is a comprehensive solution for integrating and interacting with various AI models, designed to support operations ranging from chat interactions to audio responses. This documentation will guide you through the process of utilizing `@push.rocks/smartai` in your applications.
64
-
65
- ### Getting Started
66
-
67
- Before you begin, ensure you have installed the package as described in the **Install** section above. Once installed, you can start integrating AI functionalities into your application.
68
-
69
- ### Initializing SmartAi
113
+ ## Quick Start
70
114
 
71
- The first step is to import and initialize the `SmartAi` class with appropriate options for the AI services you plan to use:
115
+ Initialize SmartAi with the provider configurations you plan to use:
72
116
 
73
117
  ```typescript
74
118
  import { SmartAi } from '@push.rocks/smartai';
75
119
 
76
120
  const smartAi = new SmartAi({
77
121
  openaiToken: 'your-openai-token',
122
+ xaiToken: 'your-xai-token',
78
123
  anthropicToken: 'your-anthropic-token',
79
124
  perplexityToken: 'your-perplexity-token',
80
125
  groqToken: 'your-groq-token',
@@ -87,35 +132,34 @@ const smartAi = new SmartAi({
87
132
  await smartAi.start();
88
133
  ```
89
134
 
90
- ### Chat Interactions
135
+ ## Usage Examples
91
136
 
92
- #### Synchronous Chat
137
+ ### Chat Interactions
93
138
 
94
- For simple question-answer interactions:
139
+ **Synchronous Chat:**
95
140
 
96
141
  ```typescript
97
142
  const response = await smartAi.openaiProvider.chat({
98
143
  systemMessage: 'You are a helpful assistant.',
99
144
  userMessage: 'What is the capital of France?',
100
- messageHistory: [] // Previous messages in the conversation
145
+ messageHistory: [] // Include previous conversation messages if applicable
101
146
  });
102
147
 
103
148
  console.log(response.message);
104
149
  ```
105
150
 
106
- #### Streaming Chat
151
+ ### Streaming Chat
107
152
 
108
- For real-time, streaming interactions:
153
+ **Real-Time Streaming:**
109
154
 
110
155
  ```typescript
111
156
  const textEncoder = new TextEncoder();
112
157
  const textDecoder = new TextDecoder();
113
158
 
114
- // Create input and output streams
159
+ // Create a transform stream for sending and receiving data
115
160
  const { writable, readable } = new TransformStream();
116
161
  const writer = writable.getWriter();
117
162
 
118
- // Send a message
119
163
  const message = {
120
164
  role: 'user',
121
165
  content: 'Tell me a story about a brave knight'
@@ -123,91 +167,92 @@ const message = {
123
167
 
124
168
  writer.write(textEncoder.encode(JSON.stringify(message) + '\n'));
125
169
 
126
- // Process the response stream
170
+ // Start streaming the response
127
171
  const stream = await smartAi.openaiProvider.chatStream(readable);
128
172
  const reader = stream.getReader();
129
173
 
130
174
  while (true) {
131
175
  const { done, value } = await reader.read();
132
176
  if (done) break;
133
- console.log('AI:', value); // Process each chunk of the response
177
+ console.log('AI:', value);
134
178
  }
135
179
  ```
136
180
 
137
181
  ### Audio Generation
138
182
 
139
- For providers that support audio generation (currently OpenAI):
183
+ Generate audio (supported by providers like OpenAI):
140
184
 
141
185
  ```typescript
142
186
  const audioStream = await smartAi.openaiProvider.audio({
143
187
  message: 'Hello, this is a test of text-to-speech'
144
188
  });
145
189
 
146
- // Handle the audio stream (e.g., save to file or play)
190
+ // Process the audio stream, for example, play it or save to a file.
147
191
  ```
148
192
 
149
193
  ### Document Processing
150
194
 
151
- For providers that support document processing (OpenAI, Ollama, and Anthropic):
195
+ Analyze and extract key information from documents:
152
196
 
153
197
  ```typescript
154
- // Using OpenAI
155
- const result = await smartAi.openaiProvider.document({
198
+ // Example using OpenAI
199
+ const documentResult = await smartAi.openaiProvider.document({
156
200
  systemMessage: 'Classify the document type',
157
201
  userMessage: 'What type of document is this?',
158
202
  messageHistory: [],
159
- pdfDocuments: [pdfBuffer] // Uint8Array of PDF content
203
+ pdfDocuments: [pdfBuffer] // Uint8Array containing the PDF content
160
204
  });
205
+ ```
161
206
 
162
- // Using Ollama with llava
163
- const analysis = await smartAi.ollamaProvider.document({
207
+ Other providers (e.g., Ollama and Anthropic) follow a similar pattern:
208
+
209
+ ```typescript
210
+ // Using Ollama for document processing
211
+ const ollamaResult = await smartAi.ollamaProvider.document({
164
212
  systemMessage: 'You are a document analysis assistant',
165
- userMessage: 'Extract the key information from this document',
213
+ userMessage: 'Extract key information from this document',
166
214
  messageHistory: [],
167
- pdfDocuments: [pdfBuffer] // Uint8Array of PDF content
215
+ pdfDocuments: [pdfBuffer]
168
216
  });
217
+ ```
169
218
 
170
- // Using Anthropic with Claude 3
171
- const anthropicAnalysis = await smartAi.anthropicProvider.document({
172
- systemMessage: 'You are a document analysis assistant',
173
- userMessage: 'Please analyze this document and extract key information',
219
+ ```typescript
220
+ // Using Anthropic for document processing
221
+ const anthropicResult = await smartAi.anthropicProvider.document({
222
+ systemMessage: 'Analyze the document',
223
+ userMessage: 'Please extract the main points',
174
224
  messageHistory: [],
175
- pdfDocuments: [pdfBuffer] // Uint8Array of PDF content
225
+ pdfDocuments: [pdfBuffer]
176
226
  });
177
227
  ```
178
228
 
179
- Both providers will:
180
- 1. Convert PDF documents to images
181
- 2. Process each page using their vision models
182
- 3. Return a comprehensive analysis based on the system message and user query
183
-
184
229
  ### Vision Processing
185
230
 
186
- For providers that support vision tasks (OpenAI, Ollama, and Anthropic):
231
+ Analyze images with vision capabilities:
187
232
 
188
233
  ```typescript
189
- // Using OpenAI's GPT-4 Vision
190
- const description = await smartAi.openaiProvider.vision({
191
- image: imageBuffer, // Buffer containing the image data
234
+ // Using OpenAI GPT-4 Vision
235
+ const imageDescription = await smartAi.openaiProvider.vision({
236
+ image: imageBuffer, // Uint8Array containing image data
192
237
  prompt: 'What do you see in this image?'
193
238
  });
194
239
 
195
- // Using Ollama's Llava model
196
- const analysis = await smartAi.ollamaProvider.vision({
240
+ // Using Ollama for vision tasks
241
+ const ollamaImageAnalysis = await smartAi.ollamaProvider.vision({
197
242
  image: imageBuffer,
198
243
  prompt: 'Analyze this image in detail'
199
244
  });
200
245
 
201
- // Using Anthropic's Claude 3
202
- const anthropicAnalysis = await smartAi.anthropicProvider.vision({
246
+ // Using Anthropic for vision analysis
247
+ const anthropicImageAnalysis = await smartAi.anthropicProvider.vision({
203
248
  image: imageBuffer,
204
- prompt: 'Please analyze this image and describe what you see'
249
+ prompt: 'Describe the contents of this image'
205
250
  });
206
251
  ```
207
252
 
208
253
  ## Error Handling
209
254
 
210
- All providers implement proper error handling. It's recommended to wrap API calls in try-catch blocks:
255
+ Always wrap API calls in try-catch blocks to manage errors effectively:
211
256
 
212
257
  ```typescript
213
258
  try {
@@ -216,26 +261,71 @@ try {
216
261
  userMessage: 'Hello!',
217
262
  messageHistory: []
218
263
  });
219
- } catch (error) {
264
+ console.log(response.message);
265
+ } catch (error: any) {
220
266
  console.error('AI provider error:', error.message);
221
267
  }
222
268
  ```
223
269
 
224
- ## License and Legal Information
270
+ ## Development
225
271
 
226
- This repository contains open-source code that is licensed under the MIT License. A copy of the MIT License can be found in the [license](license) file within this repository.
272
+ ### Running Tests
227
273
 
228
- **Please note:** The MIT License does not grant permission to use the trade names, trademarks, service marks, or product names of the project, except as required for reasonable and customary use in describing the origin of the work and reproducing the content of the NOTICE file.
274
+ To run the test suite, use the following command:
275
+
276
+ ```bash
277
+ npm run test
278
+ ```
279
+
280
+ Ensure your environment is configured with the appropriate tokens and settings for the providers you are testing.
281
+
282
+ ### Building the Project
283
+
284
+ Compile the TypeScript code and build the package using:
285
+
286
+ ```bash
287
+ npm run build
288
+ ```
289
+
290
+ This command prepares the library for distribution.
291
+
292
+ ## Contributing
293
+
294
+ Contributions are welcome! Please follow these steps:
295
+
296
+ 1. Fork the repository.
297
+ 2. Create a feature branch:
298
+ ```bash
299
+ git checkout -b feature/my-feature
300
+ ```
301
+ 3. Commit your changes with clear messages:
302
+ ```bash
303
+ git commit -m 'Add new feature'
304
+ ```
305
+ 4. Push your branch to your fork:
306
+ ```bash
307
+ git push origin feature/my-feature
308
+ ```
309
+ 5. Open a Pull Request with a detailed description of your changes.
310
+
311
+ ## License
312
+
313
+ This project is licensed under the [MIT License](LICENSE).
314
+
315
+ ## Legal Information
229
316
 
230
317
  ### Trademarks
231
318
 
232
- This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and any related products or services are trademarks of Task Venture Capital GmbH and are not included within the scope of the MIT license granted herein. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines, and any usage must be approved in writing by Task Venture Capital GmbH.
319
+ This project is owned and maintained by Task Venture Capital GmbH. The names and logos associated with Task Venture Capital GmbH and its related products or services are trademarks of Task Venture Capital GmbH and are not covered by the MIT License. Use of these trademarks must comply with Task Venture Capital GmbH's Trademark Guidelines.
233
320
 
234
321
  ### Company Information
235
322
 
236
323
  Task Venture Capital GmbH
237
- Registered at District court Bremen HRB 35230 HB, Germany
324
+ Registered at District Court Bremen HRB 35230 HB, Germany
325
+ Contact: hello@task.vc
326
+
327
+ By using this repository, you agree to the terms outlined in this section.
238
328
 
239
- For any legal inquiries or if you require further information, please contact us via email at hello@task.vc.
329
+ ---
240
330
 
241
- By using this repository, you acknowledge that you have read this section, agree to comply with its terms, and understand that the licensing of the code does not imply endorsement by Task Venture Capital GmbH of any derivative works.
331
+ Happy coding with SmartAi!
@@ -3,6 +3,6 @@
3
3
  */
4
4
  export const commitinfo = {
5
5
  name: '@push.rocks/smartai',
6
- version: '0.2.0',
6
+ version: '0.3.2',
7
7
  description: 'A TypeScript library for integrating and interacting with multiple AI models, offering capabilities for chat and potentially audio responses.'
8
8
  }
@@ -0,0 +1,183 @@
1
+ import * as plugins from './plugins.js';
2
+ import * as paths from './paths.js';
3
+ import { MultiModalModel } from './abstract.classes.multimodal.js';
4
+ import type { ChatOptions, ChatResponse, ChatMessage } from './abstract.classes.multimodal.js';
5
+ import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
6
+
7
+ export interface IXAIProviderOptions {
8
+ xaiToken: string;
9
+ }
10
+
11
+ export class XAIProvider extends MultiModalModel {
12
+ private options: IXAIProviderOptions;
13
+ public openAiApiClient: plugins.openai.default;
14
+ public smartpdfInstance: plugins.smartpdf.SmartPdf;
15
+
16
+ constructor(optionsArg: IXAIProviderOptions) {
17
+ super();
18
+ this.options = optionsArg;
19
+ }
20
+
21
+ public async start() {
22
+ this.openAiApiClient = new plugins.openai.default({
23
+ apiKey: this.options.xaiToken,
24
+ baseURL: 'https://api.x.ai/v1',
25
+ });
26
+ this.smartpdfInstance = new plugins.smartpdf.SmartPdf();
27
+ }
28
+
29
+ public async stop() {}
30
+
31
+ public async chatStream(input: ReadableStream<Uint8Array>): Promise<ReadableStream<string>> {
32
+ // Create a TextDecoder to handle incoming chunks
33
+ const decoder = new TextDecoder();
34
+ let buffer = '';
35
+ let currentMessage: { role: string; content: string; } | null = null;
36
+
37
+ // Create a TransformStream to process the input
38
+ const transform = new TransformStream<Uint8Array, string>({
39
+ async transform(chunk, controller) {
40
+ buffer += decoder.decode(chunk, { stream: true });
41
+
42
+ // Try to parse complete JSON messages from the buffer
43
+ while (true) {
44
+ const newlineIndex = buffer.indexOf('\n');
45
+ if (newlineIndex === -1) break;
46
+
47
+ const line = buffer.slice(0, newlineIndex);
48
+ buffer = buffer.slice(newlineIndex + 1);
49
+
50
+ if (line.trim()) {
51
+ try {
52
+ const message = JSON.parse(line);
53
+ currentMessage = {
54
+ role: message.role || 'user',
55
+ content: message.content || '',
56
+ };
57
+ } catch (e) {
58
+ console.error('Failed to parse message:', e);
59
+ }
60
+ }
61
+ }
62
+
63
+ // If we have a complete message, send it to X.AI
64
+ if (currentMessage) {
65
+ const stream = await this.openAiApiClient.chat.completions.create({
66
+ model: 'grok-2-latest',
67
+ messages: [{ role: currentMessage.role, content: currentMessage.content }],
68
+ stream: true,
69
+ });
70
+
71
+ // Process each chunk from X.AI
72
+ for await (const chunk of stream) {
73
+ const content = chunk.choices[0]?.delta?.content;
74
+ if (content) {
75
+ controller.enqueue(content);
76
+ }
77
+ }
78
+
79
+ currentMessage = null;
80
+ }
81
+ },
82
+
83
+ flush(controller) {
84
+ if (buffer) {
85
+ try {
86
+ const message = JSON.parse(buffer);
87
+ controller.enqueue(message.content || '');
88
+ } catch (e) {
89
+ console.error('Failed to parse remaining buffer:', e);
90
+ }
91
+ }
92
+ }
93
+ });
94
+
95
+ // Connect the input to our transform stream
96
+ return input.pipeThrough(transform);
97
+ }
98
+
99
+ public async chat(optionsArg: {
100
+ systemMessage: string;
101
+ userMessage: string;
102
+ messageHistory: { role: string; content: string; }[];
103
+ }): Promise<{ role: 'assistant'; message: string; }> {
104
+ // Prepare messages array with system message, history, and user message
105
+ const messages: ChatCompletionMessageParam[] = [
106
+ { role: 'system', content: optionsArg.systemMessage },
107
+ ...optionsArg.messageHistory.map(msg => ({
108
+ role: msg.role as 'system' | 'user' | 'assistant',
109
+ content: msg.content
110
+ })),
111
+ { role: 'user', content: optionsArg.userMessage }
112
+ ];
113
+
114
+ // Call X.AI's chat completion API
115
+ const completion = await this.openAiApiClient.chat.completions.create({
116
+ model: 'grok-2-latest',
117
+ messages: messages,
118
+ stream: false,
119
+ });
120
+
121
+ // Return the assistant's response
122
+ return {
123
+ role: 'assistant',
124
+ message: completion.choices[0]?.message?.content || ''
125
+ };
126
+ }
127
+
128
+ public async audio(optionsArg: { message: string }): Promise<NodeJS.ReadableStream> {
129
+ throw new Error('Audio generation is not supported by X.AI');
130
+ }
131
+
132
+ public async vision(optionsArg: { image: Buffer; prompt: string }): Promise<string> {
133
+ throw new Error('Vision tasks are not supported by X.AI');
134
+ }
135
+
136
+ public async document(optionsArg: {
137
+ systemMessage: string;
138
+ userMessage: string;
139
+ pdfDocuments: Uint8Array[];
140
+ messageHistory: { role: string; content: string; }[];
141
+ }): Promise<{ message: any }> {
142
+ // First convert PDF documents to images
143
+ let pdfDocumentImageBytesArray: Uint8Array[] = [];
144
+
145
+ for (const pdfDocument of optionsArg.pdfDocuments) {
146
+ const documentImageArray = await this.smartpdfInstance.convertPDFToPngBytes(pdfDocument);
147
+ pdfDocumentImageBytesArray = pdfDocumentImageBytesArray.concat(documentImageArray);
148
+ }
149
+
150
+ // Convert images to base64 for inclusion in the message
151
+ const imageBase64Array = pdfDocumentImageBytesArray.map(bytes =>
152
+ Buffer.from(bytes).toString('base64')
153
+ );
154
+
155
+ // Combine document images into the user message
156
+ const enhancedUserMessage = `
157
+ ${optionsArg.userMessage}
158
+
159
+ Document contents (as images):
160
+ ${imageBase64Array.map((img, i) => `Image ${i + 1}: <image data>`).join('\n')}
161
+ `;
162
+
163
+ // Use chat completion to analyze the documents
164
+ const messages: ChatCompletionMessageParam[] = [
165
+ { role: 'system', content: optionsArg.systemMessage },
166
+ ...optionsArg.messageHistory.map(msg => ({
167
+ role: msg.role as 'system' | 'user' | 'assistant',
168
+ content: msg.content
169
+ })),
170
+ { role: 'user', content: enhancedUserMessage }
171
+ ];
172
+
173
+ const completion = await this.openAiApiClient.chat.completions.create({
174
+ model: 'grok-2-latest',
175
+ messages: messages,
176
+ stream: false,
177
+ });
178
+
179
+ return {
180
+ message: completion.choices[0]?.message?.content || ''
181
+ };
182
+ }
183
+ }