@simfinity/constellation-client 1.0.20 → 1.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -39,17 +39,20 @@ var WebClient = class {
39
39
  * @remarks
40
40
  * A session MUST exist first in order to connect the stream next.
41
41
  *
42
- * @param voiceEnabled whether this Model-session can receive & produce audio as well as text
43
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
42
+ * @param params the configuration options to start this new session
44
43
  *
45
44
  * @exception
46
45
  * This method throws new Error(...) if unable to execute successfully for any reason.
47
46
  */
48
- async startSession(voiceEnabled, voiceName) {
47
+ async startSession(params) {
48
+ var _a, _b, _c;
49
49
  const prepareBody = {
50
- llmProvider: this.config.llm,
51
- audioEnabled: voiceEnabled,
52
- voiceName
50
+ llmProvider: params.llmProvider,
51
+ audioEnabled: params.voiceEnabled,
52
+ voiceName: params.voiceName,
53
+ temperature: (_a = params.behaviour) == null ? void 0 : _a.temperature,
54
+ instructions: (_b = params.behaviour) == null ? void 0 : _b.instructions,
55
+ maxResponseToken: (_c = params.behaviour) == null ? void 0 : _c.maxResponseToken
53
56
  };
54
57
  const response = await fetch(`${this.config.sessionEndpoint}/prepare_session`, {
55
58
  method: "POST",
package/dist/index.d.cts CHANGED
@@ -1,44 +1,81 @@
1
1
  /**
2
- * Available server-side LLM types
2
+ * Supported server-side LLM types
3
3
  */
4
- type LlmType = "openai" | "gemini";
4
+ type LlmName = "openai" | "gemini";
5
5
  /**
6
6
  * Configuration required to initiate a connection with the stream server:
7
7
  *
8
- * @sessionEndpoint : REST base URL to the constellation API for managing sessions
9
- * @streamingEndpoint : WebSocket endpoint to the constellation server
10
- * @key : Simfinity API secret key granting access to the server API
11
- * @llm : which LLM service to connect to
12
- * @model : depends on the LLM service. This is the model name as define by the LLM service
13
- *
14
8
  * @example
15
9
  * ```TypeScript
16
10
  * {
17
11
  * sessionEndpoint: "https://simfinity.constellation.com",
18
12
  * streamingEndpoint: "wss://simfinity.constellation.com:30003",
19
13
  * key: "some-secret-key"
20
- * llm: "openai",
21
- * model: "gpt-4o-realtime-preview-2024-12-17",
22
14
  * }
23
15
  * ```
24
16
  */
25
17
  interface WebClientConfig {
18
+ /**
19
+ * HTTP REST base URL to the constellation API for managing sessions.
20
+ */
26
21
  sessionEndpoint: string;
22
+ /**
23
+ * WebSocket endpoint to the constellation server.
24
+ */
27
25
  streamingEndpoint: string;
26
+ /**
27
+ * Simfinity API secret key granting access to the server API.
28
+ */
28
29
  key: string;
29
- llm: LlmType;
30
30
  }
31
31
  /**
32
- * System settings influencing the model behavior:
33
- * @temperature: LLM creativity factor in 0-1 range
34
- * @instructions: system instructions giving context, rules and directions to guide the LLM behavior
35
- * @maxResponseToken: 1-4096 value, maximum number of token used for a single response. Undefined means unlimited.
32
+ * System settings influencing how the model thinks and responds.
36
33
  */
37
34
  interface SessionConfig {
35
+ /**
36
+ * LLM creativity factor in 0-1 range
37
+ */
38
38
  temperature?: number;
39
+ /**
40
+ * System instructions giving context, rules and directions to drive the LLM behavior
41
+ */
39
42
  instructions?: string;
43
+ /**
44
+ * 1-4096 value, maximum number of token used for a single response. Undefined or 0 means unlimited.
45
+ */
40
46
  maxResponseToken?: number;
41
47
  }
48
+ /**
49
+ * Options when starting a new session. Some properties are immutable once
50
+ * the session has started, however the SessionConfig settings may, depending on
51
+ * the LLM provider chosen, be modified mid-session.
52
+ */
53
+ interface SessionStartParameters {
54
+ /**
55
+ * Name of one of the supported LLM.
56
+ */
57
+ llmProvider: LlmName;
58
+ /**
59
+ * When voiceEnabled is set to true, the LLM session is configured to
60
+ * receive and send audio stream and produce transcript of the audio conversation.
61
+ */
62
+ voiceEnabled: boolean;
63
+ /**
64
+ * Optional, the name of a voice supported by the LLM.
65
+ * Default values are 'Alloy' for OpenAI and 'Aoede' for Gemini.
66
+ * WARNING: constellation will accept any value, it belongs to the client to choose
67
+ * a valid voice name for the LLM provider chosen.
68
+ */
69
+ voiceName?: string;
70
+ /**
71
+ * Behavioral options affecting how the model thinks and responds.
72
+ * These options may be modified mid-session with configureSession(), but only
73
+ * if the chosen LLM provider allows it!
74
+ * WARNING: some LLMs like Gemini may not support mid-session updates, thus it is
75
+ * advised and preferred to provide them here at startSession time.
76
+ */
77
+ behaviour?: SessionConfig;
78
+ }
42
79
  /**
43
80
  * Snapshot of the current settings run by the model.
44
81
  */
@@ -155,13 +192,12 @@ declare class WebClient {
155
192
  * @remarks
156
193
  * A session MUST exist first in order to connect the stream next.
157
194
  *
158
- * @param voiceEnabled whether this Model-session can receive & produce audio as well as text
159
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
195
+ * @param params the configuration options to start this new session
160
196
  *
161
197
  * @exception
162
198
  * This method throws new Error(...) if unable to execute successfully for any reason.
163
199
  */
164
- startSession(voiceEnabled: boolean, voiceName?: string): Promise<void>;
200
+ startSession(params: SessionStartParameters): Promise<void>;
165
201
  /**
166
202
  * Close an opened, persistent chat room, effectively killing the streaming as well if still opened.
167
203
  * If there is no active session, this method does nothing.
@@ -270,4 +306,4 @@ declare class WebClient {
270
306
  private send;
271
307
  }
272
308
 
273
- export { type EventHandlers, type LlmType, type SessionConfig, WebClient, type WebClientConfig };
309
+ export { type EventHandlers, type LlmName, type SessionConfig, type SessionSettings, type SessionStartParameters, WebClient, type WebClientConfig };
package/dist/index.d.ts CHANGED
@@ -1,44 +1,81 @@
1
1
  /**
2
- * Available server-side LLM types
2
+ * Supported server-side LLM types
3
3
  */
4
- type LlmType = "openai" | "gemini";
4
+ type LlmName = "openai" | "gemini";
5
5
  /**
6
6
  * Configuration required to initiate a connection with the stream server:
7
7
  *
8
- * @sessionEndpoint : REST base URL to the constellation API for managing sessions
9
- * @streamingEndpoint : WebSocket endpoint to the constellation server
10
- * @key : Simfinity API secret key granting access to the server API
11
- * @llm : which LLM service to connect to
12
- * @model : depends on the LLM service. This is the model name as define by the LLM service
13
- *
14
8
  * @example
15
9
  * ```TypeScript
16
10
  * {
17
11
  * sessionEndpoint: "https://simfinity.constellation.com",
18
12
  * streamingEndpoint: "wss://simfinity.constellation.com:30003",
19
13
  * key: "some-secret-key"
20
- * llm: "openai",
21
- * model: "gpt-4o-realtime-preview-2024-12-17",
22
14
  * }
23
15
  * ```
24
16
  */
25
17
  interface WebClientConfig {
18
+ /**
19
+ * HTTP REST base URL to the constellation API for managing sessions.
20
+ */
26
21
  sessionEndpoint: string;
22
+ /**
23
+ * WebSocket endpoint to the constellation server.
24
+ */
27
25
  streamingEndpoint: string;
26
+ /**
27
+ * Simfinity API secret key granting access to the server API.
28
+ */
28
29
  key: string;
29
- llm: LlmType;
30
30
  }
31
31
  /**
32
- * System settings influencing the model behavior:
33
- * @temperature: LLM creativity factor in 0-1 range
34
- * @instructions: system instructions giving context, rules and directions to guide the LLM behavior
35
- * @maxResponseToken: 1-4096 value, maximum number of token used for a single response. Undefined means unlimited.
32
+ * System settings influencing how the model thinks and responds.
36
33
  */
37
34
  interface SessionConfig {
35
+ /**
36
+ * LLM creativity factor in 0-1 range
37
+ */
38
38
  temperature?: number;
39
+ /**
40
+ * System instructions giving context, rules and directions to drive the LLM behavior
41
+ */
39
42
  instructions?: string;
43
+ /**
44
+ * 1-4096 value, maximum number of token used for a single response. Undefined or 0 means unlimited.
45
+ */
40
46
  maxResponseToken?: number;
41
47
  }
48
+ /**
49
+ * Options when starting a new session. Some properties are immutable once
50
+ * the session has started, however the SessionConfig settings may, depending on
51
+ * the LLM provider chosen, be modified mid-session.
52
+ */
53
+ interface SessionStartParameters {
54
+ /**
55
+ * Name of one of the supported LLM.
56
+ */
57
+ llmProvider: LlmName;
58
+ /**
59
+ * When voiceEnabled is set to true, the LLM session is configured to
60
+ * receive and send audio stream and produce transcript of the audio conversation.
61
+ */
62
+ voiceEnabled: boolean;
63
+ /**
64
+ * Optional, the name of a voice supported by the LLM.
65
+ * Default values are 'Alloy' for OpenAI and 'Aoede' for Gemini.
66
+ * WARNING: constellation will accept any value, it belongs to the client to choose
67
+ * a valid voice name for the LLM provider chosen.
68
+ */
69
+ voiceName?: string;
70
+ /**
71
+ * Behavioral options affecting how the model thinks and responds.
72
+ * These options may be modified mid-session with configureSession(), but only
73
+ * if the chosen LLM provider allows it!
74
+ * WARNING: some LLMs like Gemini may not support mid-session updates, thus it is
75
+ * advised and preferred to provide them here at startSession time.
76
+ */
77
+ behaviour?: SessionConfig;
78
+ }
42
79
  /**
43
80
  * Snapshot of the current settings run by the model.
44
81
  */
@@ -155,13 +192,12 @@ declare class WebClient {
155
192
  * @remarks
156
193
  * A session MUST exist first in order to connect the stream next.
157
194
  *
158
- * @param voiceEnabled whether this Model-session can receive & produce audio as well as text
159
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
195
+ * @param params the configuration options to start this new session
160
196
  *
161
197
  * @exception
162
198
  * This method throws new Error(...) if unable to execute successfully for any reason.
163
199
  */
164
- startSession(voiceEnabled: boolean, voiceName?: string): Promise<void>;
200
+ startSession(params: SessionStartParameters): Promise<void>;
165
201
  /**
166
202
  * Close an opened, persistent chat room, effectively killing the streaming as well if still opened.
167
203
  * If there is no active session, this method does nothing.
@@ -270,4 +306,4 @@ declare class WebClient {
270
306
  private send;
271
307
  }
272
308
 
273
- export { type EventHandlers, type LlmType, type SessionConfig, WebClient, type WebClientConfig };
309
+ export { type EventHandlers, type LlmName, type SessionConfig, type SessionSettings, type SessionStartParameters, WebClient, type WebClientConfig };
package/dist/index.js CHANGED
@@ -13,17 +13,20 @@ var WebClient = class {
13
13
  * @remarks
14
14
  * A session MUST exist first in order to connect the stream next.
15
15
  *
16
- * @param voiceEnabled whether this Model-session can receive & produce audio as well as text
17
- * @param voiceName LLM specific voice name e.g. with OpenAI this could be 'alloy'
16
+ * @param params the configuration options to start this new session
18
17
  *
19
18
  * @exception
20
19
  * This method throws new Error(...) if unable to execute successfully for any reason.
21
20
  */
22
- async startSession(voiceEnabled, voiceName) {
21
+ async startSession(params) {
22
+ var _a, _b, _c;
23
23
  const prepareBody = {
24
- llmProvider: this.config.llm,
25
- audioEnabled: voiceEnabled,
26
- voiceName
24
+ llmProvider: params.llmProvider,
25
+ audioEnabled: params.voiceEnabled,
26
+ voiceName: params.voiceName,
27
+ temperature: (_a = params.behaviour) == null ? void 0 : _a.temperature,
28
+ instructions: (_b = params.behaviour) == null ? void 0 : _b.instructions,
29
+ maxResponseToken: (_c = params.behaviour) == null ? void 0 : _c.maxResponseToken
27
30
  };
28
31
  const response = await fetch(`${this.config.sessionEndpoint}/prepare_session`, {
29
32
  method: "POST",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@simfinity/constellation-client",
3
- "version": "1.0.20",
3
+ "version": "1.0.22",
4
4
  "type": "module",
5
5
  "exports": {
6
6
  ".": {