@memori.ai/memori-api-client 0.11.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/types.d.ts CHANGED
@@ -123,25 +123,29 @@ export declare type User = {
123
123
  admin?: boolean;
124
124
  superAdmin?: boolean;
125
125
  verificationCode?: string;
126
+ dontSendInvitationEmail?: boolean;
126
127
  flowID?: string;
127
128
  newsletterSubscribed?: boolean;
128
129
  maxMemori?: number;
129
- maxCompletions?: number;
130
+ numMemori?: number;
130
131
  canCreateMemori?: boolean;
131
132
  canAccessAPI?: boolean;
132
133
  canRunSnippets?: boolean;
133
134
  canEditIntegrations?: boolean;
134
135
  canEditDynamicIntents?: boolean;
135
136
  canEditMemoriChaining?: boolean;
137
+ monthSessions?: number;
138
+ monthValidSessions?: number;
136
139
  maxFreeSessions?: number;
137
140
  nonFreeSessionCost?: number;
141
+ monthCompletions?: number;
142
+ maxCompletions?: number;
138
143
  creationTimestamp?: string;
139
144
  lastChangeTimestamp?: string;
140
145
  referral?: string;
141
146
  couponCode?: string;
142
147
  paying?: boolean;
143
148
  notificationPrefs?: NotificationPrefs[];
144
- dontSendInvitationEmail?: boolean;
145
149
  };
146
150
  export declare type IntegrationResource = {
147
151
  name: string;
package/package.json CHANGED
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "0.11.0",
2
+ "version": "1.1.0",
3
3
  "name": "@memori.ai/memori-api-client",
4
4
  "description": "React library to integrate a Memori in your app or website",
5
5
  "license": "Apache-2.0",
@@ -126,7 +126,6 @@
126
126
  "typescript": "4.7.4"
127
127
  },
128
128
  "dependencies": {
129
- "cross-fetch": "^3.1.5",
130
- "microsoft-cognitiveservices-speech-sdk": "1.20.0"
129
+ "cross-fetch": "^3.1.5"
131
130
  }
132
131
  }
@@ -58,12 +58,62 @@ export default (apiUrl: string) => ({
58
58
  }
59
59
  >,
60
60
 
61
+ /**
62
+ * Gets a list of Memori objects owned by the specified user.
63
+ * @param {string} tenantName - The name of the tenant
64
+ * @param {string} userID - The user name
65
+ * @param {string=} authToken - The login token
66
+ * @returns A list of Memori objects
67
+ */
68
+ getUserByIDMemoriList: (
69
+ tenantName: string,
70
+ userID: string,
71
+ authToken?: string
72
+ ) =>
73
+ apiFetcher(
74
+ `/UserMemoriByID/${tenantName}/${userID}${
75
+ authToken ? `/${authToken}` : ''
76
+ }`,
77
+ {
78
+ apiUrl,
79
+ }
80
+ ) as Promise<
81
+ ResponseSpec & {
82
+ memori: Memori[];
83
+ }
84
+ >,
85
+
86
+ /**
87
+ * Gets a list of Memori objects owned by the specified user.
88
+ * @param {string} tenantName - The name of the tenant
89
+ * @param {string} userName - The user name
90
+ * @param {string=} authToken - The login token
91
+ * @returns A list of Memori objects
92
+ */
93
+ getUserMemoriList: (
94
+ tenantName: string,
95
+ userName: string,
96
+ authToken?: string
97
+ ) =>
98
+ apiFetcher(
99
+ `/UserMemori/${tenantName}/${userName}${
100
+ authToken ? `/${authToken}` : ''
101
+ }`,
102
+ {
103
+ apiUrl,
104
+ }
105
+ ) as Promise<
106
+ ResponseSpec & {
107
+ memori: Memori[];
108
+ }
109
+ >,
110
+
61
111
  /**
62
112
  * Gets a list of Memori objects for the currently logged in User.
63
113
  * @param authToken - The login token
64
114
  * @returns A list of Memori objects
65
115
  */
66
- getUserMemoriList: (authToken: string) =>
116
+ getMemoriList: (authToken: string) =>
67
117
  apiFetcher(`/Memori/${authToken}`, {
68
118
  apiUrl,
69
119
  }) as Promise<
@@ -7,7 +7,7 @@ export default (apiUrl: string) => ({
7
7
  * @param user - The user object
8
8
  * @returns The created user object
9
9
  */
10
- userSignIn: (user: User) =>
10
+ userSignUp: (user: User) =>
11
11
  apiFetcher('/User', {
12
12
  apiUrl,
13
13
  body: user,
@@ -19,7 +19,7 @@ export default (apiUrl: string) => ({
19
19
  * @param user - The user object
20
20
  * @returns The created user object
21
21
  */
22
- userConfirmSignIn: (user: User) =>
22
+ userConfirmSignUp: (user: User) =>
23
23
  apiFetcher('/UserConfirm', {
24
24
  apiUrl,
25
25
  body: user,
@@ -1,6 +1,5 @@
1
1
  export const getApiUrl = (hostname?: string) =>
2
2
  hostname
3
- ? new URL(
4
- hostname.startsWith('http') ? hostname : `https://${hostname}`
5
- ).origin.replace('http://', 'https://')
3
+ ? new URL(hostname.startsWith('http') ? hostname : `https://${hostname}`)
4
+ .origin
6
5
  : 'https://backend.memori.ai';
package/src/index.ts CHANGED
@@ -2,7 +2,6 @@ import { getApiUrl } from './helpers/getApiUrl';
2
2
  import backend from './backend';
3
3
  import engine from './engine';
4
4
  import * as constants from './constants';
5
- import speech from './speech';
6
5
  import asset from './helpers/asset';
7
6
 
8
7
  const api = (hostname?: string) => {
@@ -11,7 +10,6 @@ const api = (hostname?: string) => {
11
10
  return {
12
11
  backend: backend(`${apiUrl}/api/v2`),
13
12
  ...engine(`${apiUrl}/memori/v2`),
14
- speech,
15
13
  constants,
16
14
  asset: asset(`${apiUrl}/api/v2`),
17
15
  };
package/src/types.ts CHANGED
@@ -125,25 +125,29 @@ export declare type User = {
125
125
  admin?: boolean;
126
126
  superAdmin?: boolean;
127
127
  verificationCode?: string;
128
+ dontSendInvitationEmail?: boolean;
128
129
  flowID?: string;
129
130
  newsletterSubscribed?: boolean;
130
131
  maxMemori?: number;
131
- maxCompletions?: number;
132
+ numMemori?: number;
132
133
  canCreateMemori?: boolean;
133
134
  canAccessAPI?: boolean;
134
135
  canRunSnippets?: boolean;
135
136
  canEditIntegrations?: boolean;
136
137
  canEditDynamicIntents?: boolean;
137
138
  canEditMemoriChaining?: boolean;
139
+ monthSessions?: number;
140
+ monthValidSessions?: number;
138
141
  maxFreeSessions?: number;
139
142
  nonFreeSessionCost?: number;
143
+ monthCompletions?: number;
144
+ maxCompletions?: number;
140
145
  creationTimestamp?: string;
141
146
  lastChangeTimestamp?: string;
142
147
  referral?: string;
143
148
  couponCode?: string;
144
149
  paying?: boolean;
145
150
  notificationPrefs?: NotificationPrefs[];
146
- dontSendInvitationEmail?: boolean;
147
151
  };
148
152
 
149
153
  export declare type IntegrationResource = {
package/dist/speech.d.ts DELETED
@@ -1,13 +0,0 @@
1
- import * as speechSdk from 'microsoft-cognitiveservices-speech-sdk';
2
- /**
3
- * EXPERIMENTAL
4
- */
5
- declare const speech: (AZURE_COGNITIVE_SERVICES_TTS_KEY: string, DEBUG?: boolean) => (lang: string, voiceType: 'FEMALE' | 'MALE') => {
6
- speak: (text: string, onAudioEnd?: ((sender: speechSdk.IPlayer) => void) | undefined) => void;
7
- isSpeaking: () => boolean;
8
- stopSpeaking: () => void;
9
- recognize: (onRecognized: (transcript: string) => void) => void;
10
- isRecognizing: () => boolean;
11
- stopRecognizing: (onStop?: (() => void) | undefined) => void;
12
- };
13
- export default speech;
package/src/speech.ts DELETED
@@ -1,243 +0,0 @@
1
- import * as speechSdk from 'microsoft-cognitiveservices-speech-sdk';
2
-
3
- const getTTSVoice = (lang: string, voiceType: 'MALE' | 'FEMALE'): string => {
4
- let voice = '';
5
- let voiceLang = lang.toUpperCase();
6
- switch (voiceLang) {
7
- case 'IT':
8
- voice = `${
9
- voiceType === 'MALE' ? 'it-IT-DiegoNeural' : 'it-IT-ElsaNeural'
10
- }`;
11
- break;
12
- case 'DE':
13
- voice = `${
14
- voiceType === 'MALE' ? 'de-DE-ConradNeural' : 'de-DE-KatjaNeural'
15
- }`;
16
- break;
17
- case 'EN':
18
- voice = `${
19
- voiceType === 'MALE' ? 'en-GB-RyanNeural' : 'en-GB-SoniaNeural'
20
- }`;
21
- break;
22
- case 'ES':
23
- voice = `${
24
- voiceType === 'MALE' ? 'es-ES-AlvaroNeural' : 'es-ES-ElviraNeural'
25
- }`;
26
- break;
27
- case 'FR':
28
- voice = `${
29
- voiceType === 'MALE' ? 'fr-FR-HenriNeural' : 'fr-FR-DeniseNeural'
30
- }`;
31
- break;
32
- case 'PT':
33
- voice = `${
34
- voiceType === 'MALE' ? 'pt-PT-DuarteNeural' : 'pt-PT-RaquelNeural'
35
- }`;
36
- break;
37
- default:
38
- voice = `${
39
- voiceType === 'MALE' ? 'it-IT-DiegoNeural' : 'it-IT-IsabellaNeural'
40
- }`;
41
- break;
42
- }
43
- return voice;
44
- };
45
-
46
- const getCultureCodeByLanguage = (lang: string): string => {
47
- let voice = '';
48
- let voiceLang = lang.toUpperCase();
49
- switch (voiceLang) {
50
- case 'IT':
51
- voice = 'it-IT';
52
- break;
53
- case 'DE':
54
- voice = 'de-DE';
55
- break;
56
- case 'EN':
57
- voice = 'en-US';
58
- break;
59
- case 'ES':
60
- voice = 'es-ES';
61
- break;
62
- case 'FR':
63
- voice = 'fr-FR';
64
- break;
65
- case 'PT':
66
- voice = 'pt-PT';
67
- break;
68
- default:
69
- voice = 'it-IT';
70
- break;
71
- }
72
- return voice;
73
- };
74
-
75
- /**
76
- * EXPERIMENTAL
77
- */
78
- const speech = (AZURE_COGNITIVE_SERVICES_TTS_KEY: string, DEBUG = false) => (
79
- lang: string,
80
- voiceType: 'FEMALE' | 'MALE'
81
- ) => {
82
- let speechConfig: speechSdk.SpeechConfig = speechSdk.SpeechConfig.fromSubscription(
83
- AZURE_COGNITIVE_SERVICES_TTS_KEY,
84
- 'eastus'
85
- );
86
- let speechSynthesizer: speechSdk.SpeechSynthesizer | null;
87
- let audioDestination: speechSdk.SpeakerAudioDestination;
88
-
89
- audioDestination = new speechSdk.SpeakerAudioDestination();
90
- let audioOutputConfig = speechSdk.AudioConfig.fromSpeakerOutput(
91
- audioDestination
92
- );
93
-
94
- // https://docs.microsoft.com/it-it/azure/cognitive-services/speech-service/language-support#text-to-speech
95
- speechConfig.speechSynthesisVoiceName = getTTSVoice(lang, voiceType);
96
-
97
- let langCultureCode = getCultureCodeByLanguage(lang);
98
- speechConfig.speechSynthesisLanguage = langCultureCode;
99
- speechConfig.speechRecognitionLanguage = langCultureCode;
100
-
101
- /**
102
- * speak
103
- * @description Speaks the text using the speech synthesizer. (TTS)
104
- * @param {string} text - The text to be synthesized.
105
- * @param {func=} onAudioEnd - The callback to be invoked when the synthesized audio is finished.
106
- */
107
- const speak = (
108
- text: string,
109
- onAudioEnd?: (sender: speechSdk.IPlayer) => void
110
- ) => {
111
- stopSpeaking();
112
-
113
- speechSynthesizer = new speechSdk.SpeechSynthesizer(
114
- speechConfig,
115
- audioOutputConfig
116
- );
117
-
118
- if (onAudioEnd) audioDestination.onAudioEnd = onAudioEnd;
119
-
120
- speechSynthesizer.speakTextAsync(
121
- text,
122
- result => {
123
- if (result) {
124
- try {
125
- if (DEBUG) console.log('speak result', result);
126
- if (speechSynthesizer) {
127
- speechSynthesizer.close();
128
- speechSynthesizer = null;
129
- }
130
- } catch (e) {
131
- console.error('speak error: ', e);
132
- window.speechSynthesis.speak(new SpeechSynthesisUtterance(text));
133
- }
134
- } else if (DEBUG) {
135
- console.log('speak no result', result);
136
- }
137
- },
138
- error => {
139
- console.error('speak:', error);
140
- window.speechSynthesis.speak(new SpeechSynthesisUtterance(text));
141
- }
142
- );
143
- };
144
-
145
- /**
146
- * isSpeaking
147
- * @description Returns true if the synthesizer is speaking.
148
- * @returns {boolean}
149
- */
150
- const isSpeaking = (): boolean => {
151
- return !!speechSynthesizer;
152
- };
153
-
154
- /**
155
- * stopSpeaking
156
- * @description Stops the speech synthesizer if it is synthesizing.
157
- */
158
- const stopSpeaking = () => {
159
- if (audioDestination) audioDestination.pause();
160
- if (speechSynthesizer) {
161
- speechSynthesizer.close();
162
- speechSynthesizer = null;
163
- }
164
- };
165
-
166
- const audioInputConfig = speechSdk.AudioConfig.fromDefaultMicrophoneInput();
167
- let recognizer: speechSdk.SpeechRecognizer | null;
168
-
169
- /**
170
- * recognize
171
- * @description Starts the speech recognition.
172
- * @param {func=} onRecognized - Callback method invoked when the speech is recognized with the text.
173
- */
174
- const recognize = (onRecognized: (transcript: string) => void) => {
175
- recognizer = new speechSdk.SpeechRecognizer(speechConfig, audioInputConfig);
176
-
177
- recognizer.recognizing = (_s, e) => {
178
- if (DEBUG) console.log(`RECOGNIZING: Text=${e.result.text}`);
179
- };
180
- recognizer.recognized = (_s, e) => {
181
- if (e.result.reason === speechSdk.ResultReason.RecognizedSpeech) {
182
- if (DEBUG) console.log(`RECOGNIZED: Text=${e.result.text}`);
183
- onRecognized(e.result.text ?? '');
184
- } else if (e.result.reason === speechSdk.ResultReason.NoMatch && DEBUG) {
185
- console.log('NOMATCH: Speech could not be recognized.');
186
- }
187
- };
188
- recognizer.canceled = (_s, e) => {
189
- if (DEBUG) console.log(`CANCELED: Reason=${e.reason}`);
190
-
191
- if (e.reason === speechSdk.CancellationReason.Error && DEBUG) {
192
- console.log(`"CANCELED: ErrorCode=${e.errorCode}`);
193
- console.log(`"CANCELED: ErrorDetails=${e.errorDetails}`);
194
- console.log(
195
- 'CANCELED: Did you set the speech resource key and region values?'
196
- );
197
- }
198
-
199
- stopRecognizing();
200
- };
201
-
202
- recognizer.sessionStopped = (_s, _e) => {
203
- if (DEBUG) console.log('\n Session stopped event.');
204
- if (recognizer) recognizer.stopContinuousRecognitionAsync();
205
- };
206
- recognizer.startContinuousRecognitionAsync();
207
- };
208
-
209
- /**
210
- * isRecognizing
211
- * @description Returns true if the recognizer is recognizing.
212
- * @returns {boolean}
213
- */
214
- const isRecognizing = (): boolean => {
215
- return !!recognizer;
216
- };
217
-
218
- /**
219
- * stopRecognizing
220
- * @description Stops the speech recognizer if it is recognizing.
221
- * @param {func=} onStop - (optional) The callback to be invoked when the speech recognition is stopped.
222
- */
223
- const stopRecognizing = (onStop?: () => void) => {
224
- if (recognizer) {
225
- recognizer.stopContinuousRecognitionAsync();
226
- recognizer.close();
227
- recognizer = null;
228
-
229
- if (onStop) onStop();
230
- }
231
- };
232
-
233
- return {
234
- speak,
235
- isSpeaking,
236
- stopSpeaking,
237
- recognize,
238
- isRecognizing,
239
- stopRecognizing,
240
- };
241
- };
242
-
243
- export default speech;