@agentica/core 0.32.9 → 0.33.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/lib/events/AgenticaCallEvent.d.ts +0 -4
  2. package/lib/events/AgenticaExecuteEvent.d.ts +1 -1
  3. package/lib/events/AgenticaJsonParseErrorEvent.d.ts +4 -0
  4. package/lib/events/AgenticaResponseEvent.d.ts +1 -0
  5. package/lib/events/AgenticaValidateEvent.d.ts +1 -1
  6. package/lib/factory/events.d.ts +4 -2
  7. package/lib/factory/events.js +11 -3
  8. package/lib/factory/events.js.map +1 -1
  9. package/lib/index.mjs +100 -85
  10. package/lib/index.mjs.map +1 -1
  11. package/lib/json/IAgenticaEventJson.d.ts +9 -5
  12. package/lib/orchestrate/call.js +9 -5
  13. package/lib/orchestrate/call.js.map +1 -1
  14. package/lib/orchestrate/cancel.js +1 -1
  15. package/lib/orchestrate/cancel.js.map +1 -1
  16. package/lib/orchestrate/describe.js +1 -1
  17. package/lib/orchestrate/describe.js.map +1 -1
  18. package/lib/orchestrate/initialize.js +1 -1
  19. package/lib/orchestrate/initialize.js.map +1 -1
  20. package/lib/orchestrate/internal/cancelFunctionFromContext.js +1 -1
  21. package/lib/orchestrate/internal/cancelFunctionFromContext.js.map +1 -1
  22. package/lib/orchestrate/internal/selectFunctionFromContext.js +1 -1
  23. package/lib/orchestrate/internal/selectFunctionFromContext.js.map +1 -1
  24. package/lib/orchestrate/select.js +1 -1
  25. package/lib/orchestrate/select.js.map +1 -1
  26. package/lib/utils/ChatGptCompletionMessageUtil.d.ts +2 -2
  27. package/lib/utils/ChatGptCompletionMessageUtil.js +3 -3
  28. package/lib/utils/ChatGptCompletionMessageUtil.js.map +1 -1
  29. package/lib/utils/ChatGptCompletionMessageUtil.spec.js +4 -4
  30. package/lib/utils/ChatGptCompletionMessageUtil.spec.js.map +1 -1
  31. package/lib/utils/ChatGptCompletionStreamingUtil.js.map +1 -1
  32. package/lib/utils/ChatGptCompletionStreamingUtil.spec.js +2 -1
  33. package/lib/utils/ChatGptCompletionStreamingUtil.spec.js.map +1 -1
  34. package/lib/utils/JsonUtil.js +1 -1
  35. package/lib/utils/JsonUtil.js.map +1 -1
  36. package/lib/utils/JsonUtil.spec.js +38 -38
  37. package/lib/utils/JsonUtil.spec.js.map +1 -1
  38. package/lib/utils/StreamUtil.spec.js.map +1 -1
  39. package/lib/utils/request.d.ts +7 -7
  40. package/lib/utils/request.js +67 -65
  41. package/lib/utils/request.js.map +1 -1
  42. package/package.json +7 -7
  43. package/src/events/AgenticaCallEvent.ts +0 -5
  44. package/src/events/AgenticaExecuteEvent.ts +1 -1
  45. package/src/events/AgenticaJsonParseErrorEvent.ts +4 -0
  46. package/src/events/AgenticaResponseEvent.ts +2 -0
  47. package/src/events/AgenticaValidateEvent.ts +1 -1
  48. package/src/factory/events.ts +15 -5
  49. package/src/json/IAgenticaEventJson.ts +11 -6
  50. package/src/orchestrate/call.ts +14 -7
  51. package/src/orchestrate/cancel.ts +1 -1
  52. package/src/orchestrate/describe.ts +1 -1
  53. package/src/orchestrate/initialize.ts +1 -1
  54. package/src/orchestrate/internal/cancelFunctionFromContext.ts +1 -1
  55. package/src/orchestrate/internal/selectFunctionFromContext.ts +1 -1
  56. package/src/orchestrate/select.ts +1 -1
  57. package/src/utils/ChatGptCompletionMessageUtil.spec.ts +4 -4
  58. package/src/utils/ChatGptCompletionMessageUtil.ts +5 -4
  59. package/src/utils/ChatGptCompletionStreamingUtil.spec.ts +437 -435
  60. package/src/utils/ChatGptCompletionStreamingUtil.ts +5 -4
  61. package/src/utils/JsonUtil.spec.ts +58 -60
  62. package/src/utils/JsonUtil.ts +1 -3
  63. package/src/utils/StreamUtil.spec.ts +1 -1
  64. package/src/utils/StreamUtil.ts +2 -2
  65. package/src/utils/request.ts +92 -85
@@ -1,5 +1,7 @@
1
- import { describe, it, expect, vi, beforeEach } from "vitest";
2
1
  import type { ChatCompletionChunk } from "openai/resources";
2
+
3
+ import { beforeEach, describe, expect, it, vi } from "vitest";
4
+
3
5
  import { reduceStreamingWithDispatch } from "./ChatGptCompletionStreamingUtil";
4
6
  import { StreamUtil } from "./StreamUtil";
5
7
 
@@ -95,7 +97,7 @@ describe("reduceStreamingWithDispatch", () => {
95
97
  expect(result).toBeDefined();
96
98
  expect(result.object).toBe("chat.completion");
97
99
  expect(eventProcessor).toHaveBeenCalledTimes(1);
98
-
100
+
99
101
  const eventCall = eventProcessor.mock.calls[0]?.[0];
100
102
  expect(eventCall.get()).toBe("Hello World!");
101
103
  });
@@ -142,7 +144,7 @@ describe("reduceStreamingWithDispatch", () => {
142
144
 
143
145
  expect(result).toBeDefined();
144
146
  expect(eventProcessor).toHaveBeenCalledTimes(1);
145
-
147
+
146
148
  const eventCall = eventProcessor.mock.calls[0]?.[0];
147
149
  expect(eventCall.get()).toBe("Hello");
148
150
  });
@@ -201,7 +203,7 @@ describe("reduceStreamingWithDispatch", () => {
201
203
 
202
204
  expect(result).toBeDefined();
203
205
  expect(eventProcessor).toHaveBeenCalledTimes(2);
204
-
206
+
205
207
  const firstCall = eventProcessor.mock.calls[0]?.[0];
206
208
  const secondCall = eventProcessor.mock.calls[1]?.[0];
207
209
  expect(firstCall.get()).toBe("Choice 1 continued");
@@ -252,7 +254,7 @@ describe("reduceStreamingWithDispatch", () => {
252
254
 
253
255
  expect(result).toBeDefined();
254
256
  expect(eventProcessor).toHaveBeenCalledTimes(1);
255
-
257
+
256
258
  const eventCall = eventProcessor.mock.calls[0]?.[0];
257
259
  expect(eventCall.get()).toBe("Hello World");
258
260
  expect(eventCall.done()).toBe(true);
@@ -372,7 +374,7 @@ describe("reduceStreamingWithDispatch", () => {
372
374
  const eventProcessor = vi.fn();
373
375
 
374
376
  await expect(reduceStreamingWithDispatch(stream, eventProcessor)).rejects.toThrow(
375
- "StreamUtil.reduce did not produce a ChatCompletion"
377
+ "StreamUtil.reduce did not produce a ChatCompletion",
376
378
  );
377
379
  });
378
380
 
@@ -477,432 +479,432 @@ describe("reduceStreamingWithDispatch", () => {
477
479
 
478
480
  expect(result).toBeDefined();
479
481
  expect(eventProcessor).toHaveBeenCalledTimes(1);
480
-
481
- const eventCall = eventProcessor.mock.calls[0]?.[0];
482
- expect(eventCall.get()).toBe("Hello World");
483
- });
484
- });
485
-
486
- describe("edge cases and exceptions", () => {
487
- it("should handle null delta content", async () => {
488
- const chunks: ChatCompletionChunk[] = [
489
- {
490
- id: "test-id",
491
- object: "chat.completion.chunk",
492
- created: 1234567890,
493
- model: "gpt-3.5-turbo",
494
- choices: [
495
- {
496
- index: 0,
497
- delta: { content: null },
498
- finish_reason: null,
499
- },
500
- ],
501
- },
502
- {
503
- id: "test-id",
504
- object: "chat.completion.chunk",
505
- created: 1234567890,
506
- model: "gpt-3.5-turbo",
507
- choices: [
508
- {
509
- index: 0,
510
- delta: { content: "Hello" },
511
- finish_reason: "stop",
512
- },
513
- ],
514
- },
515
- ];
516
-
517
- const stream = new ReadableStream<ChatCompletionChunk>({
518
- start(controller) {
519
- chunks.forEach(chunk => controller.enqueue(chunk));
520
- controller.close();
521
- },
522
- });
523
-
524
- const eventProcessor = vi.fn();
525
- const result = await reduceStreamingWithDispatch(stream, eventProcessor);
526
-
527
- expect(result).toBeDefined();
528
- expect(eventProcessor).toHaveBeenCalledTimes(1);
529
-
530
- const eventCall = eventProcessor.mock.calls[0]?.[0];
531
- expect(eventCall.get()).toBe("Hello");
532
- });
533
-
534
- it("should handle missing delta object", async () => {
535
- const chunks: ChatCompletionChunk[] = [
536
- {
537
- id: "test-id",
538
- object: "chat.completion.chunk",
539
- created: 1234567890,
540
- model: "gpt-3.5-turbo",
541
- choices: [
542
- {
543
- index: 0,
544
- delta: {},
545
- finish_reason: null,
546
- },
547
- ],
548
- },
549
- {
550
- id: "test-id",
551
- object: "chat.completion.chunk",
552
- created: 1234567890,
553
- model: "gpt-3.5-turbo",
554
- choices: [
555
- {
556
- index: 0,
557
- delta: { content: "Hello" },
558
- finish_reason: "stop",
559
- },
560
- ],
561
- },
562
- ];
563
-
564
- const stream = new ReadableStream<ChatCompletionChunk>({
565
- start(controller) {
566
- chunks.forEach(chunk => controller.enqueue(chunk));
567
- controller.close();
568
- },
569
- });
570
-
571
- const eventProcessor = vi.fn();
572
- const result = await reduceStreamingWithDispatch(stream, eventProcessor);
573
-
574
- expect(result).toBeDefined();
575
- expect(eventProcessor).toHaveBeenCalledTimes(1);
576
-
577
- const eventCall = eventProcessor.mock.calls[0]?.[0];
578
- expect(eventCall.get()).toBe("Hello");
579
- });
580
-
581
- it("should handle chunks with no choices", async () => {
582
- const chunks: ChatCompletionChunk[] = [
583
- {
584
- id: "test-id",
585
- object: "chat.completion.chunk",
586
- created: 1234567890,
587
- model: "gpt-3.5-turbo",
588
- choices: [],
589
- },
590
- {
591
- id: "test-id",
592
- object: "chat.completion.chunk",
593
- created: 1234567890,
594
- model: "gpt-3.5-turbo",
595
- choices: [
596
- {
597
- index: 0,
598
- delta: { content: "Hello" },
599
- finish_reason: "stop",
600
- },
601
- ],
602
- },
603
- ];
604
-
605
- const stream = new ReadableStream<ChatCompletionChunk>({
606
- start(controller) {
607
- chunks.forEach(chunk => controller.enqueue(chunk));
608
- controller.close();
609
- },
610
- });
611
-
612
- const eventProcessor = vi.fn();
613
- const result = await reduceStreamingWithDispatch(stream, eventProcessor);
614
-
615
- expect(result).toBeDefined();
616
- expect(eventProcessor).toHaveBeenCalledTimes(1);
617
-
618
- const eventCall = eventProcessor.mock.calls[0]?.[0];
619
- expect(eventCall.get()).toBe("Hello");
620
- });
621
-
622
- it("should handle very large content chunks", async () => {
623
- const largeContent = "x".repeat(10000);
624
- const chunks: ChatCompletionChunk[] = [
625
- {
626
- id: "test-id",
627
- object: "chat.completion.chunk",
628
- created: 1234567890,
629
- model: "gpt-3.5-turbo",
630
- choices: [
631
- {
632
- index: 0,
633
- delta: { content: largeContent },
634
- finish_reason: "stop",
635
- },
636
- ],
637
- },
638
- ];
639
-
640
- const stream = new ReadableStream<ChatCompletionChunk>({
641
- start(controller) {
642
- chunks.forEach(chunk => controller.enqueue(chunk));
643
- controller.close();
644
- },
645
- });
646
-
647
- const eventProcessor = vi.fn();
648
- const result = await reduceStreamingWithDispatch(stream, eventProcessor);
649
-
650
- expect(result).toBeDefined();
651
- // Now single chunk with content should trigger eventProcessor
652
- expect(eventProcessor).toHaveBeenCalledOnce();
653
-
654
- const eventCall = eventProcessor.mock.calls[0]?.[0];
655
- expect(eventCall.get()).toBe(largeContent);
656
- });
657
-
658
- it("should handle rapid consecutive chunks", async () => {
659
- const chunks: ChatCompletionChunk[] = Array.from({ length: 100 }, (_, i) => ({
660
- id: "test-id",
661
- object: "chat.completion.chunk" as const,
662
- created: 1234567890,
663
- model: "gpt-3.5-turbo",
664
- choices: [
665
- {
666
- index: 0,
667
- delta: { content: i.toString() },
668
- finish_reason: i === 99 ? "stop" as const : null,
669
- },
670
- ],
671
- }));
672
-
673
- const stream = new ReadableStream<ChatCompletionChunk>({
674
- start(controller) {
675
- chunks.forEach(chunk => controller.enqueue(chunk));
676
- controller.close();
677
- },
678
- });
679
-
680
- const eventProcessor = vi.fn();
681
- const result = await reduceStreamingWithDispatch(stream, eventProcessor);
682
-
683
- expect(result).toBeDefined();
684
- expect(eventProcessor).toHaveBeenCalledTimes(1);
685
-
686
- const eventCall = eventProcessor.mock.calls[0]?.[0];
687
- const expectedContent = Array.from({ length: 100 }, (_, i) => i.toString()).join("");
688
- expect(eventCall.get()).toBe(expectedContent);
689
- });
690
-
691
- it("should handle out-of-order choice indices", async () => {
692
- const chunks: ChatCompletionChunk[] = [
693
- {
694
- id: "test-id",
695
- object: "chat.completion.chunk",
696
- created: 1234567890,
697
- model: "gpt-3.5-turbo",
698
- choices: [
699
- {
700
- index: 2,
701
- delta: { content: "Third" },
702
- finish_reason: null,
703
- },
704
- {
705
- index: 0,
706
- delta: { content: "First" },
707
- finish_reason: null,
708
- },
709
- {
710
- index: 1,
711
- delta: { content: "Second" },
712
- finish_reason: null,
713
- },
714
- ],
715
- },
716
- {
717
- id: "test-id",
718
- object: "chat.completion.chunk",
719
- created: 1234567890,
720
- model: "gpt-3.5-turbo",
721
- choices: [
722
- {
723
- index: 0,
724
- delta: { content: " content" },
725
- finish_reason: "stop",
726
- },
727
- {
728
- index: 1,
729
- delta: { content: " content" },
730
- finish_reason: "stop",
731
- },
732
- {
733
- index: 2,
734
- delta: { content: " content" },
735
- finish_reason: "stop",
736
- },
737
- ],
738
- },
739
- ];
740
-
741
- const stream = new ReadableStream<ChatCompletionChunk>({
742
- start(controller) {
743
- chunks.forEach(chunk => controller.enqueue(chunk));
744
- controller.close();
745
- },
746
- });
747
-
748
- const eventProcessor = vi.fn();
749
- const result = await reduceStreamingWithDispatch(stream, eventProcessor);
750
-
751
- expect(result).toBeDefined();
752
- expect(eventProcessor).toHaveBeenCalledTimes(3);
753
-
754
- const calls = eventProcessor.mock.calls.map(call => call[0]);
755
- expect(calls[0].get()).toBe("Third content");
756
- expect(calls[1].get()).toBe("First content");
757
- expect(calls[2].get()).toBe("Second content");
758
- });
759
-
760
- it("should handle mixed finish reasons", async () => {
761
- const chunks: ChatCompletionChunk[] = [
762
- {
763
- id: "test-id",
764
- object: "chat.completion.chunk",
765
- created: 1234567890,
766
- model: "gpt-3.5-turbo",
767
- choices: [
768
- {
769
- index: 0,
770
- delta: { content: "Hello" },
771
- finish_reason: null,
772
- },
773
- {
774
- index: 1,
775
- delta: { content: "World" },
776
- finish_reason: null,
777
- },
778
- ],
779
- },
780
- {
781
- id: "test-id",
782
- object: "chat.completion.chunk",
783
- created: 1234567890,
784
- model: "gpt-3.5-turbo",
785
- choices: [
786
- {
787
- index: 0,
788
- delta: { content: " there" },
789
- finish_reason: "stop",
790
- },
791
- {
792
- index: 1,
793
- delta: { content: "!" },
794
- finish_reason: "length",
795
- },
796
- ],
797
- },
798
- ];
799
-
800
- const stream = StreamUtil.from(...chunks);
801
-
802
- const eventProcessor = vi.fn();
803
- const result = await reduceStreamingWithDispatch(stream, eventProcessor);
804
-
805
- expect(result).toBeDefined();
806
- expect(eventProcessor).toHaveBeenCalledTimes(2);
807
-
808
- const firstCall = eventProcessor.mock.calls[0]?.[0];
809
- const secondCall = eventProcessor.mock.calls[1]?.[0];
810
- expect(firstCall.get()).toBe("Hello there");
811
- expect(secondCall.get()).toBe("World!");
812
- await firstCall.join();
813
- await secondCall.join();
814
- expect(firstCall.done()).toBe(true);
815
- expect(secondCall.done()).toBe(true);
816
- });
817
-
818
- it("should handle Unicode and special characters", async () => {
819
- const specialContent = "Hello 🌍! 안녕하세요 مرحبا 🚀 ñáéíóú";
820
- const chunks: ChatCompletionChunk[] = [
821
- {
822
- id: "test-id",
823
- object: "chat.completion.chunk",
824
- created: 1234567890,
825
- model: "gpt-3.5-turbo",
826
- choices: [
827
- {
828
- index: 0,
829
- delta: { content: specialContent },
830
- finish_reason: "stop",
831
- },
832
- ],
833
- },
834
- ];
835
-
836
- const stream = StreamUtil.from(...chunks);
837
-
838
- const eventProcessor = vi.fn();
839
- const result = await reduceStreamingWithDispatch(stream, eventProcessor);
840
-
841
- expect(result).toBeDefined();
842
- // Now single chunk with content should trigger eventProcessor
843
- expect(eventProcessor).toHaveBeenCalledOnce();
844
-
845
- const eventCall = eventProcessor.mock.calls[0]?.[0];
846
- expect(eventCall.get()).toBe(specialContent);
847
- });
848
-
849
- it("should handle stream reader errors gracefully", async () => {
850
- const chunks: ChatCompletionChunk[] = [
851
- {
852
- id: "test-id",
853
- object: "chat.completion.chunk",
854
- created: 1234567890,
855
- model: "gpt-3.5-turbo",
856
- choices: [
857
- {
858
- index: 0,
859
- delta: { content: "Hello" },
860
- finish_reason: null,
861
- },
862
- ],
863
- },
864
- ];
865
-
866
- const stream = new ReadableStream<ChatCompletionChunk>({
867
- start(controller) {
868
- controller.enqueue(chunks[0]);
869
- // Simulate an error in the stream
870
- controller.error(new Error("Stream error"));
871
- },
872
- });
873
-
874
- const eventProcessor = vi.fn();
875
-
876
- await expect(reduceStreamingWithDispatch(stream, eventProcessor))
877
- .rejects.toThrow("Stream error");
878
- });
879
-
880
- it("should handle completely malformed chunks gracefully", async () => {
881
- const malformedChunk = {
882
- // Missing required fields
883
- object: "chat.completion.chunk",
884
- choices: [
885
- {
886
- // Missing index
887
- delta: { content: "Hello" },
888
- finish_reason: null,
889
- },
890
- ],
891
- } as any;
892
-
893
- const stream = new ReadableStream<ChatCompletionChunk>({
894
- start(controller) {
895
- controller.enqueue(malformedChunk);
896
- controller.close();
897
- },
898
- });
899
-
900
- const eventProcessor = vi.fn();
901
-
902
- // Should not throw, but should handle gracefully
903
- const result = await reduceStreamingWithDispatch(stream, eventProcessor);
904
- expect(result).toBeDefined();
905
- });
906
- });
907
- });
908
-
482
+
483
+ const eventCall = eventProcessor.mock.calls[0]?.[0];
484
+ expect(eventCall.get()).toBe("Hello World");
485
+ });
486
+ });
487
+
488
+ describe("edge cases and exceptions", () => {
489
+ it("should handle null delta content", async () => {
490
+ const chunks: ChatCompletionChunk[] = [
491
+ {
492
+ id: "test-id",
493
+ object: "chat.completion.chunk",
494
+ created: 1234567890,
495
+ model: "gpt-3.5-turbo",
496
+ choices: [
497
+ {
498
+ index: 0,
499
+ delta: { content: null },
500
+ finish_reason: null,
501
+ },
502
+ ],
503
+ },
504
+ {
505
+ id: "test-id",
506
+ object: "chat.completion.chunk",
507
+ created: 1234567890,
508
+ model: "gpt-3.5-turbo",
509
+ choices: [
510
+ {
511
+ index: 0,
512
+ delta: { content: "Hello" },
513
+ finish_reason: "stop",
514
+ },
515
+ ],
516
+ },
517
+ ];
518
+
519
+ const stream = new ReadableStream<ChatCompletionChunk>({
520
+ start(controller) {
521
+ chunks.forEach(chunk => controller.enqueue(chunk));
522
+ controller.close();
523
+ },
524
+ });
525
+
526
+ const eventProcessor = vi.fn();
527
+ const result = await reduceStreamingWithDispatch(stream, eventProcessor);
528
+
529
+ expect(result).toBeDefined();
530
+ expect(eventProcessor).toHaveBeenCalledTimes(1);
531
+
532
+ const eventCall = eventProcessor.mock.calls[0]?.[0];
533
+ expect(eventCall.get()).toBe("Hello");
534
+ });
535
+
536
+ it("should handle missing delta object", async () => {
537
+ const chunks: ChatCompletionChunk[] = [
538
+ {
539
+ id: "test-id",
540
+ object: "chat.completion.chunk",
541
+ created: 1234567890,
542
+ model: "gpt-3.5-turbo",
543
+ choices: [
544
+ {
545
+ index: 0,
546
+ delta: {},
547
+ finish_reason: null,
548
+ },
549
+ ],
550
+ },
551
+ {
552
+ id: "test-id",
553
+ object: "chat.completion.chunk",
554
+ created: 1234567890,
555
+ model: "gpt-3.5-turbo",
556
+ choices: [
557
+ {
558
+ index: 0,
559
+ delta: { content: "Hello" },
560
+ finish_reason: "stop",
561
+ },
562
+ ],
563
+ },
564
+ ];
565
+
566
+ const stream = new ReadableStream<ChatCompletionChunk>({
567
+ start(controller) {
568
+ chunks.forEach(chunk => controller.enqueue(chunk));
569
+ controller.close();
570
+ },
571
+ });
572
+
573
+ const eventProcessor = vi.fn();
574
+ const result = await reduceStreamingWithDispatch(stream, eventProcessor);
575
+
576
+ expect(result).toBeDefined();
577
+ expect(eventProcessor).toHaveBeenCalledTimes(1);
578
+
579
+ const eventCall = eventProcessor.mock.calls[0]?.[0];
580
+ expect(eventCall.get()).toBe("Hello");
581
+ });
582
+
583
+ it("should handle chunks with no choices", async () => {
584
+ const chunks: ChatCompletionChunk[] = [
585
+ {
586
+ id: "test-id",
587
+ object: "chat.completion.chunk",
588
+ created: 1234567890,
589
+ model: "gpt-3.5-turbo",
590
+ choices: [],
591
+ },
592
+ {
593
+ id: "test-id",
594
+ object: "chat.completion.chunk",
595
+ created: 1234567890,
596
+ model: "gpt-3.5-turbo",
597
+ choices: [
598
+ {
599
+ index: 0,
600
+ delta: { content: "Hello" },
601
+ finish_reason: "stop",
602
+ },
603
+ ],
604
+ },
605
+ ];
606
+
607
+ const stream = new ReadableStream<ChatCompletionChunk>({
608
+ start(controller) {
609
+ chunks.forEach(chunk => controller.enqueue(chunk));
610
+ controller.close();
611
+ },
612
+ });
613
+
614
+ const eventProcessor = vi.fn();
615
+ const result = await reduceStreamingWithDispatch(stream, eventProcessor);
616
+
617
+ expect(result).toBeDefined();
618
+ expect(eventProcessor).toHaveBeenCalledTimes(1);
619
+
620
+ const eventCall = eventProcessor.mock.calls[0]?.[0];
621
+ expect(eventCall.get()).toBe("Hello");
622
+ });
623
+
624
+ it("should handle very large content chunks", async () => {
625
+ const largeContent = "x".repeat(10000);
626
+ const chunks: ChatCompletionChunk[] = [
627
+ {
628
+ id: "test-id",
629
+ object: "chat.completion.chunk",
630
+ created: 1234567890,
631
+ model: "gpt-3.5-turbo",
632
+ choices: [
633
+ {
634
+ index: 0,
635
+ delta: { content: largeContent },
636
+ finish_reason: "stop",
637
+ },
638
+ ],
639
+ },
640
+ ];
641
+
642
+ const stream = new ReadableStream<ChatCompletionChunk>({
643
+ start(controller) {
644
+ chunks.forEach(chunk => controller.enqueue(chunk));
645
+ controller.close();
646
+ },
647
+ });
648
+
649
+ const eventProcessor = vi.fn();
650
+ const result = await reduceStreamingWithDispatch(stream, eventProcessor);
651
+
652
+ expect(result).toBeDefined();
653
+ // Now single chunk with content should trigger eventProcessor
654
+ expect(eventProcessor).toHaveBeenCalledOnce();
655
+
656
+ const eventCall = eventProcessor.mock.calls[0]?.[0];
657
+ expect(eventCall.get()).toBe(largeContent);
658
+ });
659
+
660
+ it("should handle rapid consecutive chunks", async () => {
661
+ const chunks: ChatCompletionChunk[] = Array.from({ length: 100 }, (_, i) => ({
662
+ id: "test-id",
663
+ object: "chat.completion.chunk" as const,
664
+ created: 1234567890,
665
+ model: "gpt-3.5-turbo",
666
+ choices: [
667
+ {
668
+ index: 0,
669
+ delta: { content: i.toString() },
670
+ finish_reason: i === 99 ? "stop" as const : null,
671
+ },
672
+ ],
673
+ }));
674
+
675
+ const stream = new ReadableStream<ChatCompletionChunk>({
676
+ start(controller) {
677
+ chunks.forEach(chunk => controller.enqueue(chunk));
678
+ controller.close();
679
+ },
680
+ });
681
+
682
+ const eventProcessor = vi.fn();
683
+ const result = await reduceStreamingWithDispatch(stream, eventProcessor);
684
+
685
+ expect(result).toBeDefined();
686
+ expect(eventProcessor).toHaveBeenCalledTimes(1);
687
+
688
+ const eventCall = eventProcessor.mock.calls[0]?.[0];
689
+ const expectedContent = Array.from({ length: 100 }, (_, i) => i.toString()).join("");
690
+ expect(eventCall.get()).toBe(expectedContent);
691
+ });
692
+
693
+ it("should handle out-of-order choice indices", async () => {
694
+ const chunks: ChatCompletionChunk[] = [
695
+ {
696
+ id: "test-id",
697
+ object: "chat.completion.chunk",
698
+ created: 1234567890,
699
+ model: "gpt-3.5-turbo",
700
+ choices: [
701
+ {
702
+ index: 2,
703
+ delta: { content: "Third" },
704
+ finish_reason: null,
705
+ },
706
+ {
707
+ index: 0,
708
+ delta: { content: "First" },
709
+ finish_reason: null,
710
+ },
711
+ {
712
+ index: 1,
713
+ delta: { content: "Second" },
714
+ finish_reason: null,
715
+ },
716
+ ],
717
+ },
718
+ {
719
+ id: "test-id",
720
+ object: "chat.completion.chunk",
721
+ created: 1234567890,
722
+ model: "gpt-3.5-turbo",
723
+ choices: [
724
+ {
725
+ index: 0,
726
+ delta: { content: " content" },
727
+ finish_reason: "stop",
728
+ },
729
+ {
730
+ index: 1,
731
+ delta: { content: " content" },
732
+ finish_reason: "stop",
733
+ },
734
+ {
735
+ index: 2,
736
+ delta: { content: " content" },
737
+ finish_reason: "stop",
738
+ },
739
+ ],
740
+ },
741
+ ];
742
+
743
+ const stream = new ReadableStream<ChatCompletionChunk>({
744
+ start(controller) {
745
+ chunks.forEach(chunk => controller.enqueue(chunk));
746
+ controller.close();
747
+ },
748
+ });
749
+
750
+ const eventProcessor = vi.fn();
751
+ const result = await reduceStreamingWithDispatch(stream, eventProcessor);
752
+
753
+ expect(result).toBeDefined();
754
+ expect(eventProcessor).toHaveBeenCalledTimes(3);
755
+
756
+ const calls = eventProcessor.mock.calls.map(call => call[0]);
757
+ expect(calls[0].get()).toBe("Third content");
758
+ expect(calls[1].get()).toBe("First content");
759
+ expect(calls[2].get()).toBe("Second content");
760
+ });
761
+
762
+ it("should handle mixed finish reasons", async () => {
763
+ const chunks: ChatCompletionChunk[] = [
764
+ {
765
+ id: "test-id",
766
+ object: "chat.completion.chunk",
767
+ created: 1234567890,
768
+ model: "gpt-3.5-turbo",
769
+ choices: [
770
+ {
771
+ index: 0,
772
+ delta: { content: "Hello" },
773
+ finish_reason: null,
774
+ },
775
+ {
776
+ index: 1,
777
+ delta: { content: "World" },
778
+ finish_reason: null,
779
+ },
780
+ ],
781
+ },
782
+ {
783
+ id: "test-id",
784
+ object: "chat.completion.chunk",
785
+ created: 1234567890,
786
+ model: "gpt-3.5-turbo",
787
+ choices: [
788
+ {
789
+ index: 0,
790
+ delta: { content: " there" },
791
+ finish_reason: "stop",
792
+ },
793
+ {
794
+ index: 1,
795
+ delta: { content: "!" },
796
+ finish_reason: "length",
797
+ },
798
+ ],
799
+ },
800
+ ];
801
+
802
+ const stream = StreamUtil.from(...chunks);
803
+
804
+ const eventProcessor = vi.fn();
805
+ const result = await reduceStreamingWithDispatch(stream, eventProcessor);
806
+
807
+ expect(result).toBeDefined();
808
+ expect(eventProcessor).toHaveBeenCalledTimes(2);
809
+
810
+ const firstCall = eventProcessor.mock.calls[0]?.[0];
811
+ const secondCall = eventProcessor.mock.calls[1]?.[0];
812
+ expect(firstCall.get()).toBe("Hello there");
813
+ expect(secondCall.get()).toBe("World!");
814
+ await firstCall.join();
815
+ await secondCall.join();
816
+ expect(firstCall.done()).toBe(true);
817
+ expect(secondCall.done()).toBe(true);
818
+ });
819
+
820
+ it("should handle Unicode and special characters", async () => {
821
+ const specialContent = "Hello 🌍! 안녕하세요 مرحبا 🚀 ñáéíóú";
822
+ const chunks: ChatCompletionChunk[] = [
823
+ {
824
+ id: "test-id",
825
+ object: "chat.completion.chunk",
826
+ created: 1234567890,
827
+ model: "gpt-3.5-turbo",
828
+ choices: [
829
+ {
830
+ index: 0,
831
+ delta: { content: specialContent },
832
+ finish_reason: "stop",
833
+ },
834
+ ],
835
+ },
836
+ ];
837
+
838
+ const stream = StreamUtil.from(...chunks);
839
+
840
+ const eventProcessor = vi.fn();
841
+ const result = await reduceStreamingWithDispatch(stream, eventProcessor);
842
+
843
+ expect(result).toBeDefined();
844
+ // Now single chunk with content should trigger eventProcessor
845
+ expect(eventProcessor).toHaveBeenCalledOnce();
846
+
847
+ const eventCall = eventProcessor.mock.calls[0]?.[0];
848
+ expect(eventCall.get()).toBe(specialContent);
849
+ });
850
+
851
+ it("should handle stream reader errors gracefully", async () => {
852
+ const chunks: ChatCompletionChunk[] = [
853
+ {
854
+ id: "test-id",
855
+ object: "chat.completion.chunk",
856
+ created: 1234567890,
857
+ model: "gpt-3.5-turbo",
858
+ choices: [
859
+ {
860
+ index: 0,
861
+ delta: { content: "Hello" },
862
+ finish_reason: null,
863
+ },
864
+ ],
865
+ },
866
+ ];
867
+
868
+ const stream = new ReadableStream<ChatCompletionChunk>({
869
+ start(controller) {
870
+ controller.enqueue(chunks[0]);
871
+ // Simulate an error in the stream
872
+ controller.error(new Error("Stream error"));
873
+ },
874
+ });
875
+
876
+ const eventProcessor = vi.fn();
877
+
878
+ await expect(reduceStreamingWithDispatch(stream, eventProcessor))
879
+ .rejects
880
+ .toThrow("Stream error");
881
+ });
882
+
883
+ it("should handle completely malformed chunks gracefully", async () => {
884
+ const malformedChunk = {
885
+ // Missing required fields
886
+ object: "chat.completion.chunk",
887
+ choices: [
888
+ {
889
+ // Missing index
890
+ delta: { content: "Hello" },
891
+ finish_reason: null,
892
+ },
893
+ ],
894
+ } as any;
895
+
896
+ const stream = new ReadableStream<ChatCompletionChunk>({
897
+ start(controller) {
898
+ controller.enqueue(malformedChunk);
899
+ controller.close();
900
+ },
901
+ });
902
+
903
+ const eventProcessor = vi.fn();
904
+
905
+ // Should not throw, but should handle gracefully
906
+ const result = await reduceStreamingWithDispatch(stream, eventProcessor);
907
+ expect(result).toBeDefined();
908
+ });
909
+ });
910
+ });