@aituber-onair/chat 0.29.0 → 0.30.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.ja.md +4 -4
  2. package/README.md +4 -4
  3. package/dist/cjs/constants/xai.d.ts +1 -0
  4. package/dist/cjs/constants/xai.d.ts.map +1 -1
  5. package/dist/cjs/constants/xai.js +3 -1
  6. package/dist/cjs/constants/xai.js.map +1 -1
  7. package/dist/cjs/services/ChatService.d.ts +2 -0
  8. package/dist/cjs/services/ChatService.d.ts.map +1 -1
  9. package/dist/cjs/services/providers/openai/OpenAIChatService.d.ts +0 -14
  10. package/dist/cjs/services/providers/openai/OpenAIChatService.d.ts.map +1 -1
  11. package/dist/cjs/services/providers/openai/OpenAIChatService.js +5 -192
  12. package/dist/cjs/services/providers/openai/OpenAIChatService.js.map +1 -1
  13. package/dist/cjs/services/providers/openai/responsesParser.d.ts +10 -0
  14. package/dist/cjs/services/providers/openai/responsesParser.d.ts.map +1 -0
  15. package/dist/cjs/services/providers/openai/responsesParser.js +166 -0
  16. package/dist/cjs/services/providers/openai/responsesParser.js.map +1 -0
  17. package/dist/cjs/services/providers/xai/XAIChatServiceProvider.d.ts.map +1 -1
  18. package/dist/cjs/services/providers/xai/XAIChatServiceProvider.js +1 -0
  19. package/dist/cjs/services/providers/xai/XAIChatServiceProvider.js.map +1 -1
  20. package/dist/esm/constants/xai.d.ts +1 -0
  21. package/dist/esm/constants/xai.d.ts.map +1 -1
  22. package/dist/esm/constants/xai.js +2 -0
  23. package/dist/esm/constants/xai.js.map +1 -1
  24. package/dist/esm/services/ChatService.d.ts +2 -0
  25. package/dist/esm/services/ChatService.d.ts.map +1 -1
  26. package/dist/esm/services/providers/openai/OpenAIChatService.d.ts +0 -14
  27. package/dist/esm/services/providers/openai/OpenAIChatService.d.ts.map +1 -1
  28. package/dist/esm/services/providers/openai/OpenAIChatService.js +5 -192
  29. package/dist/esm/services/providers/openai/OpenAIChatService.js.map +1 -1
  30. package/dist/esm/services/providers/openai/responsesParser.d.ts +10 -0
  31. package/dist/esm/services/providers/openai/responsesParser.d.ts.map +1 -0
  32. package/dist/esm/services/providers/openai/responsesParser.js +162 -0
  33. package/dist/esm/services/providers/openai/responsesParser.js.map +1 -0
  34. package/dist/esm/services/providers/xai/XAIChatServiceProvider.d.ts.map +1 -1
  35. package/dist/esm/services/providers/xai/XAIChatServiceProvider.js +2 -1
  36. package/dist/esm/services/providers/xai/XAIChatServiceProvider.js.map +1 -1
  37. package/dist/umd/aituber-onair-chat.js +165 -181
  38. package/dist/umd/aituber-onair-chat.min.js +8 -8
  39. package/package.json +1 -1
@@ -119,6 +119,7 @@ var AITuberOnAirChat = (() => {
119
119
  MODEL_GROK_4_1_FAST_REASONING: () => MODEL_GROK_4_1_FAST_REASONING,
120
120
  MODEL_GROK_4_20_NON_REASONING: () => MODEL_GROK_4_20_NON_REASONING,
121
121
  MODEL_GROK_4_20_REASONING: () => MODEL_GROK_4_20_REASONING,
122
+ MODEL_GROK_4_3: () => MODEL_GROK_4_3,
122
123
  MODEL_KIMI_K2_5: () => MODEL_KIMI_K2_5,
123
124
  MODEL_KIMI_K2_6: () => MODEL_KIMI_K2_6,
124
125
  MODEL_MOONSHOTAI_KIMI_K2_5: () => MODEL_MOONSHOTAI_KIMI_K2_5,
@@ -417,11 +418,13 @@ var AITuberOnAirChat = (() => {
417
418
 
418
419
  // src/constants/xai.ts
419
420
  var ENDPOINT_XAI_CHAT_COMPLETIONS_API = "https://api.x.ai/v1/chat/completions";
421
+ var MODEL_GROK_4_3 = "grok-4.3";
420
422
  var MODEL_GROK_4_20_REASONING = "grok-4.20-0309-reasoning";
421
423
  var MODEL_GROK_4_20_NON_REASONING = "grok-4.20-0309-non-reasoning";
422
424
  var MODEL_GROK_4_1_FAST_REASONING = "grok-4-1-fast-reasoning";
423
425
  var MODEL_GROK_4_1_FAST_NON_REASONING = "grok-4-1-fast-non-reasoning";
424
426
  var XAI_VISION_SUPPORTED_MODELS = [
427
+ MODEL_GROK_4_3,
425
428
  MODEL_GROK_4_20_REASONING,
426
429
  MODEL_GROK_4_20_NON_REASONING,
427
430
  MODEL_GROK_4_1_FAST_REASONING,
@@ -2855,6 +2858,164 @@ If it's in another language, summarize in that language.
2855
2858
  }
2856
2859
  };
2857
2860
 
2861
+ // src/services/providers/openai/responsesParser.ts
2862
+ async function parseOpenAIResponsesStream(res, onPartial) {
2863
+ const reader = res.body.getReader();
2864
+ const dec = new TextDecoder();
2865
+ const textBlocks = [];
2866
+ const toolCallsMap = /* @__PURE__ */ new Map();
2867
+ let responseStatus;
2868
+ let incompleteDetails;
2869
+ let usage;
2870
+ let buf = "";
2871
+ while (true) {
2872
+ const { done, value } = await reader.read();
2873
+ if (done) break;
2874
+ buf += dec.decode(value, { stream: true });
2875
+ let eventType = "";
2876
+ let eventData = "";
2877
+ const lines = buf.split("\n");
2878
+ buf = lines.pop() || "";
2879
+ for (let i = 0; i < lines.length; i++) {
2880
+ const line = lines[i].trim();
2881
+ if (line.startsWith("event:")) {
2882
+ eventType = line.slice(6).trim();
2883
+ } else if (line.startsWith("data:")) {
2884
+ eventData = line.slice(5).trim();
2885
+ } else if (line === "" && eventType && eventData) {
2886
+ try {
2887
+ const json = JSON.parse(eventData);
2888
+ handleResponsesSSEEvent(
2889
+ eventType,
2890
+ json,
2891
+ onPartial,
2892
+ textBlocks,
2893
+ toolCallsMap,
2894
+ (metadata) => {
2895
+ if (metadata.responseStatus !== void 0) {
2896
+ responseStatus = metadata.responseStatus;
2897
+ }
2898
+ if (metadata.incompleteDetails !== void 0) {
2899
+ incompleteDetails = metadata.incompleteDetails;
2900
+ }
2901
+ if (metadata.usage !== void 0) {
2902
+ usage = metadata.usage;
2903
+ }
2904
+ }
2905
+ );
2906
+ } catch {
2907
+ console.warn("Failed to parse SSE data:", eventData);
2908
+ }
2909
+ eventType = "";
2910
+ eventData = "";
2911
+ }
2912
+ }
2913
+ }
2914
+ const toolBlocks = Array.from(toolCallsMap.values()).map(
2915
+ (tool) => ({
2916
+ type: "tool_use",
2917
+ id: tool.id,
2918
+ name: tool.name,
2919
+ input: tool.input || {}
2920
+ })
2921
+ );
2922
+ return {
2923
+ blocks: [...textBlocks, ...toolBlocks],
2924
+ stop_reason: toolBlocks.length ? "tool_use" : "end",
2925
+ truncated: responseStatus === "incomplete",
2926
+ response_status: responseStatus,
2927
+ incomplete_details: incompleteDetails,
2928
+ usage
2929
+ };
2930
+ }
2931
+ function handleResponsesSSEEvent(eventType, data, onPartial, textBlocks, toolCallsMap, onMetadata) {
2932
+ switch (eventType) {
2933
+ case "response.output_item.added":
2934
+ if (data.item?.type === "message" && Array.isArray(data.item.content)) {
2935
+ data.item.content.forEach((c) => {
2936
+ if (c.type === "output_text" && c.text) {
2937
+ onPartial(c.text);
2938
+ StreamTextAccumulator.append(textBlocks, c.text);
2939
+ }
2940
+ });
2941
+ } else if (data.item?.type === "function_call") {
2942
+ toolCallsMap.set(data.item.id, {
2943
+ id: data.item.id,
2944
+ name: data.item.name,
2945
+ input: data.item.arguments ? JSON.parse(data.item.arguments) : {}
2946
+ });
2947
+ }
2948
+ break;
2949
+ case "response.content_part.added":
2950
+ if (data.part?.type === "output_text" && typeof data.part.text === "string") {
2951
+ onPartial(data.part.text);
2952
+ StreamTextAccumulator.append(textBlocks, data.part.text);
2953
+ }
2954
+ break;
2955
+ case "response.output_text.delta":
2956
+ case "response.content_part.delta": {
2957
+ const deltaText = typeof data.delta === "string" ? data.delta : data.delta?.text ?? "";
2958
+ if (deltaText) {
2959
+ onPartial(deltaText);
2960
+ StreamTextAccumulator.append(textBlocks, deltaText);
2961
+ }
2962
+ break;
2963
+ }
2964
+ case "response.output_text.done":
2965
+ case "response.content_part.done":
2966
+ case "response.reasoning.started":
2967
+ case "response.reasoning.delta":
2968
+ case "response.reasoning.done":
2969
+ break;
2970
+ case "response.completed":
2971
+ onMetadata(extractResponsesMetadata(data, "completed"));
2972
+ break;
2973
+ case "response.incomplete":
2974
+ onMetadata(extractResponsesMetadata(data, "incomplete"));
2975
+ break;
2976
+ default:
2977
+ break;
2978
+ }
2979
+ }
2980
+ function extractResponsesMetadata(data, fallbackStatus) {
2981
+ const response = data?.response ?? data;
2982
+ return {
2983
+ responseStatus: response?.status ?? fallbackStatus,
2984
+ incompleteDetails: response?.incomplete_details ?? null,
2985
+ usage: response?.usage
2986
+ };
2987
+ }
2988
+ function parseOpenAIResponsesOneShot(data) {
2989
+ const blocks = [];
2990
+ if (data.output && Array.isArray(data.output)) {
2991
+ data.output.forEach((outputItem) => {
2992
+ if (outputItem.type === "message" && outputItem.content) {
2993
+ outputItem.content.forEach((content) => {
2994
+ if (content.type === "output_text" && content.text) {
2995
+ blocks.push({ type: "text", text: content.text });
2996
+ }
2997
+ });
2998
+ }
2999
+ if (outputItem.type === "function_call") {
3000
+ blocks.push({
3001
+ type: "tool_use",
3002
+ id: outputItem.id,
3003
+ name: outputItem.name,
3004
+ input: outputItem.arguments ? JSON.parse(outputItem.arguments) : {}
3005
+ });
3006
+ }
3007
+ });
3008
+ }
3009
+ return {
3010
+ blocks,
3011
+ stop_reason: blocks.some((b) => b.type === "tool_use") ? "tool_use" : "end",
3012
+ truncated: data?.status === "incomplete",
3013
+ response_status: data?.status,
3014
+ incomplete_details: data?.incomplete_details ?? null,
3015
+ usage: data?.usage
3016
+ };
3017
+ }
3018
+
2858
3019
  // src/services/providers/openai/OpenAIChatService.ts
2859
3020
  var GPT5_RESPONSE_LENGTH_MIN_TOKENS = {
2860
3021
  [CHAT_RESPONSE_LENGTH.VERY_SHORT]: 800,
@@ -2925,7 +3086,7 @@ If it's in another language, summarize in that language.
2925
3086
  const isResponsesAPI = this.endpoint === ENDPOINT_OPENAI_RESPONSES_API;
2926
3087
  try {
2927
3088
  if (isResponsesAPI) {
2928
- const result = await this.parseResponsesStream(
3089
+ const result = await parseOpenAIResponsesStream(
2929
3090
  res,
2930
3091
  onPartialResponse
2931
3092
  );
@@ -2958,7 +3119,7 @@ If it's in another language, summarize in that language.
2958
3119
  const isResponsesAPI = this.endpoint === ENDPOINT_OPENAI_RESPONSES_API;
2959
3120
  try {
2960
3121
  if (isResponsesAPI) {
2961
- const result = await this.parseResponsesStream(
3122
+ const result = await parseOpenAIResponsesStream(
2962
3123
  res,
2963
3124
  onPartialResponse
2964
3125
  );
@@ -3019,7 +3180,7 @@ If it's in another language, summarize in that language.
3019
3180
  async parseResponse(res, stream, onPartialResponse) {
3020
3181
  const isResponsesAPI = this.endpoint === ENDPOINT_OPENAI_RESPONSES_API;
3021
3182
  if (isResponsesAPI) {
3022
- return stream ? this.parseResponsesStream(res, onPartialResponse) : this.parseResponsesOneShot(await res.json());
3183
+ return stream ? parseOpenAIResponsesStream(res, onPartialResponse) : parseOpenAIResponsesOneShot(await res.json());
3023
3184
  }
3024
3185
  return stream ? this.parseStream(res, onPartialResponse) : this.parseOneShot(await res.json());
3025
3186
  }
@@ -3213,184 +3374,6 @@ If it's in another language, summarize in that language.
3213
3374
  parseOneShot(data) {
3214
3375
  return parseOpenAICompatibleOneShot(data);
3215
3376
  }
3216
- /**
3217
- * Parse streaming response from Responses API (SSE format)
3218
- */
3219
- async parseResponsesStream(res, onPartial) {
3220
- const reader = res.body.getReader();
3221
- const dec = new TextDecoder();
3222
- const textBlocks = [];
3223
- const toolCallsMap = /* @__PURE__ */ new Map();
3224
- let responseStatus;
3225
- let incompleteDetails;
3226
- let usage;
3227
- let buf = "";
3228
- while (true) {
3229
- const { done, value } = await reader.read();
3230
- if (done) break;
3231
- buf += dec.decode(value, { stream: true });
3232
- let eventType = "";
3233
- let eventData = "";
3234
- const lines = buf.split("\n");
3235
- buf = lines.pop() || "";
3236
- for (let i = 0; i < lines.length; i++) {
3237
- const line = lines[i].trim();
3238
- if (line.startsWith("event:")) {
3239
- eventType = line.slice(6).trim();
3240
- } else if (line.startsWith("data:")) {
3241
- eventData = line.slice(5).trim();
3242
- } else if (line === "" && eventType && eventData) {
3243
- try {
3244
- const json = JSON.parse(eventData);
3245
- const completionResult = this.handleResponsesSSEEvent(
3246
- eventType,
3247
- json,
3248
- onPartial,
3249
- textBlocks,
3250
- toolCallsMap,
3251
- (metadata) => {
3252
- if (metadata.responseStatus !== void 0) {
3253
- responseStatus = metadata.responseStatus;
3254
- }
3255
- if (metadata.incompleteDetails !== void 0) {
3256
- incompleteDetails = metadata.incompleteDetails;
3257
- }
3258
- if (metadata.usage !== void 0) {
3259
- usage = metadata.usage;
3260
- }
3261
- }
3262
- );
3263
- if (completionResult === "completed") {
3264
- }
3265
- } catch (e) {
3266
- console.warn("Failed to parse SSE data:", eventData);
3267
- }
3268
- eventType = "";
3269
- eventData = "";
3270
- }
3271
- }
3272
- }
3273
- const toolBlocks = Array.from(toolCallsMap.values()).map(
3274
- (tool) => ({
3275
- type: "tool_use",
3276
- id: tool.id,
3277
- name: tool.name,
3278
- input: tool.input || {}
3279
- })
3280
- );
3281
- const blocks = [...textBlocks, ...toolBlocks];
3282
- return {
3283
- blocks,
3284
- stop_reason: toolBlocks.length ? "tool_use" : "end",
3285
- truncated: responseStatus === "incomplete",
3286
- response_status: responseStatus,
3287
- incomplete_details: incompleteDetails,
3288
- usage
3289
- };
3290
- }
3291
- /**
3292
- * Handle specific SSE events from Responses API
3293
- * @returns 'completed' if the response is completed, undefined otherwise
3294
- */
3295
- handleResponsesSSEEvent(eventType, data, onPartial, textBlocks, toolCallsMap, onMetadata) {
3296
- switch (eventType) {
3297
- // Item addition events
3298
- case "response.output_item.added":
3299
- if (data.item?.type === "message" && Array.isArray(data.item.content)) {
3300
- data.item.content.forEach((c) => {
3301
- if (c.type === "output_text" && c.text) {
3302
- onPartial(c.text);
3303
- StreamTextAccumulator.append(textBlocks, c.text);
3304
- }
3305
- });
3306
- } else if (data.item?.type === "function_call") {
3307
- toolCallsMap.set(data.item.id, {
3308
- id: data.item.id,
3309
- name: data.item.name,
3310
- input: data.item.arguments ? JSON.parse(data.item.arguments) : {}
3311
- });
3312
- }
3313
- break;
3314
- // Initial content part events
3315
- case "response.content_part.added":
3316
- if (data.part?.type === "output_text" && typeof data.part.text === "string") {
3317
- onPartial(data.part.text);
3318
- StreamTextAccumulator.append(textBlocks, data.part.text);
3319
- }
3320
- break;
3321
- // Text delta events
3322
- case "response.output_text.delta":
3323
- case "response.content_part.delta":
3324
- {
3325
- const deltaText = typeof data.delta === "string" ? data.delta : data.delta?.text ?? "";
3326
- if (deltaText) {
3327
- onPartial(deltaText);
3328
- StreamTextAccumulator.append(textBlocks, deltaText);
3329
- }
3330
- }
3331
- break;
3332
- // Text completion events - do not add text here as it's already accumulated via delta events
3333
- case "response.output_text.done":
3334
- case "response.content_part.done":
3335
- break;
3336
- // Response completion events
3337
- case "response.completed":
3338
- onMetadata(this.extractResponsesMetadata(data, "completed"));
3339
- return "completed";
3340
- case "response.incomplete":
3341
- onMetadata(this.extractResponsesMetadata(data, "incomplete"));
3342
- return "completed";
3343
- // GPT-5 reasoning token events (not visible but counted for billing)
3344
- case "response.reasoning.started":
3345
- case "response.reasoning.delta":
3346
- case "response.reasoning.done":
3347
- break;
3348
- default:
3349
- break;
3350
- }
3351
- return void 0;
3352
- }
3353
- extractResponsesMetadata(data, fallbackStatus) {
3354
- const response = data?.response ?? data;
3355
- return {
3356
- responseStatus: response?.status ?? fallbackStatus,
3357
- incompleteDetails: response?.incomplete_details ?? null,
3358
- usage: response?.usage
3359
- };
3360
- }
3361
- /**
3362
- * Parse non-streaming response from Responses API
3363
- */
3364
- parseResponsesOneShot(data) {
3365
- const blocks = [];
3366
- if (data.output && Array.isArray(data.output)) {
3367
- data.output.forEach((outputItem) => {
3368
- if (outputItem.type === "message" && outputItem.content) {
3369
- outputItem.content.forEach((content) => {
3370
- if (content.type === "output_text" && content.text) {
3371
- blocks.push({ type: "text", text: content.text });
3372
- }
3373
- });
3374
- }
3375
- if (outputItem.type === "function_call") {
3376
- blocks.push({
3377
- type: "tool_use",
3378
- id: outputItem.id,
3379
- name: outputItem.name,
3380
- input: outputItem.arguments ? JSON.parse(outputItem.arguments) : {}
3381
- });
3382
- }
3383
- });
3384
- }
3385
- return {
3386
- blocks,
3387
- stop_reason: blocks.some((b) => b.type === "tool_use") ? "tool_use" : "end",
3388
- truncated: data?.status === "incomplete",
3389
- response_status: data?.status,
3390
- incomplete_details: data?.incomplete_details ?? null,
3391
- usage: data?.usage
3392
- };
3393
- }
3394
3377
  };
3395
3378
 
3396
3379
  // src/services/providers/openaiCompatible/OpenAICompatibleChatServiceProvider.ts
@@ -4179,6 +4162,7 @@ If it's in another language, summarize in that language.
4179
4162
  */
4180
4163
  getSupportedModels() {
4181
4164
  return [
4165
+ MODEL_GROK_4_3,
4182
4166
  MODEL_GROK_4_20_REASONING,
4183
4167
  MODEL_GROK_4_20_NON_REASONING,
4184
4168
  MODEL_GROK_4_1_FAST_REASONING,