@langchain/core 0.2.1 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,10 +5,12 @@ import { test } from "@jest/globals";
5
5
  import { z } from "zod";
6
6
  import { RunnableLambda, RunnableMap, RunnablePassthrough, RunnablePick, } from "../index.js";
7
7
  import { ChatPromptTemplate } from "../../prompts/chat.js";
8
- import { FakeListChatModel, FakeRetriever, FakeStreamingLLM, } from "../../utils/testing/index.js";
9
- import { AIMessageChunk, HumanMessage, SystemMessage, } from "../../messages/index.js";
8
+ import { FakeChatModel, FakeLLM, FakeListChatModel, FakeRetriever, FakeStreamingLLM, } from "../../utils/testing/index.js";
9
+ import { AIMessage, AIMessageChunk, HumanMessage, SystemMessage, } from "../../messages/index.js";
10
10
  import { DynamicStructuredTool, DynamicTool } from "../../tools.js";
11
11
  import { Document } from "../../documents/document.js";
12
+ import { PromptTemplate } from "../../prompts/prompt.js";
13
+ import { GenerationChunk } from "../../outputs.js";
12
14
  function reverse(s) {
13
15
  // Reverse a string.
14
16
  return s.split("").reverse().join("");
@@ -528,77 +530,141 @@ test("Runnable streamEvents method with llm", async () => {
528
530
  expect(events).toEqual([
529
531
  {
530
532
  event: "on_llm_start",
533
+ data: {
534
+ input: "hello",
535
+ },
531
536
  name: "my_model",
537
+ tags: ["my_model"],
532
538
  run_id: expect.any(String),
533
- tags: expect.arrayContaining(["my_model"]),
534
539
  metadata: {
535
540
  a: "b",
536
541
  },
542
+ },
543
+ {
544
+ event: "on_llm_stream",
537
545
  data: {
538
- input: "hello",
546
+ chunk: {
547
+ text: "h",
548
+ },
549
+ },
550
+ run_id: expect.any(String),
551
+ name: "my_model",
552
+ tags: ["my_model"],
553
+ metadata: {
554
+ a: "b",
539
555
  },
540
556
  },
541
557
  {
542
558
  event: "on_llm_stream",
543
559
  run_id: expect.any(String),
544
- tags: expect.arrayContaining(["my_model"]),
560
+ name: "my_model",
561
+ tags: ["my_model"],
545
562
  metadata: {
546
563
  a: "b",
547
564
  },
548
- name: "my_model",
549
- data: { chunk: "h" },
565
+ data: {
566
+ chunk: "h",
567
+ },
550
568
  },
551
569
  {
552
570
  event: "on_llm_stream",
571
+ data: {
572
+ chunk: {
573
+ text: "e",
574
+ },
575
+ },
553
576
  run_id: expect.any(String),
554
- tags: expect.arrayContaining(["my_model"]),
577
+ name: "my_model",
578
+ tags: ["my_model"],
555
579
  metadata: {
556
580
  a: "b",
557
581
  },
558
- name: "my_model",
559
- data: { chunk: "e" },
560
582
  },
561
583
  {
562
584
  event: "on_llm_stream",
563
585
  run_id: expect.any(String),
564
- tags: expect.arrayContaining(["my_model"]),
586
+ name: "my_model",
587
+ tags: ["my_model"],
565
588
  metadata: {
566
589
  a: "b",
567
590
  },
568
- name: "my_model",
569
- data: { chunk: "y" },
591
+ data: {
592
+ chunk: "e",
593
+ },
570
594
  },
571
595
  {
572
596
  event: "on_llm_stream",
597
+ data: {
598
+ chunk: {
599
+ text: "y",
600
+ },
601
+ },
573
602
  run_id: expect.any(String),
574
- tags: expect.arrayContaining(["my_model"]),
603
+ name: "my_model",
604
+ tags: ["my_model"],
575
605
  metadata: {
576
606
  a: "b",
577
607
  },
608
+ },
609
+ {
610
+ event: "on_llm_stream",
611
+ run_id: expect.any(String),
578
612
  name: "my_model",
579
- data: { chunk: "!" },
613
+ tags: ["my_model"],
614
+ metadata: {
615
+ a: "b",
616
+ },
617
+ data: {
618
+ chunk: "y",
619
+ },
580
620
  },
581
621
  {
582
- event: "on_llm_end",
622
+ event: "on_llm_stream",
623
+ data: {
624
+ chunk: {
625
+ text: "!",
626
+ },
627
+ },
628
+ run_id: expect.any(String),
583
629
  name: "my_model",
630
+ tags: ["my_model"],
631
+ metadata: {
632
+ a: "b",
633
+ },
634
+ },
635
+ {
636
+ event: "on_llm_stream",
584
637
  run_id: expect.any(String),
585
- tags: expect.arrayContaining(["my_model"]),
638
+ name: "my_model",
639
+ tags: ["my_model"],
586
640
  metadata: {
587
641
  a: "b",
588
642
  },
643
+ data: {
644
+ chunk: "!",
645
+ },
646
+ },
647
+ {
648
+ event: "on_llm_end",
589
649
  data: {
590
650
  output: {
591
651
  generations: [
592
652
  [
593
653
  {
594
- generationInfo: {},
595
654
  text: "hey!",
655
+ generationInfo: {},
596
656
  },
597
657
  ],
598
658
  ],
599
659
  llmOutput: {},
600
660
  },
601
661
  },
662
+ run_id: expect.any(String),
663
+ name: "my_model",
664
+ tags: ["my_model"],
665
+ metadata: {
666
+ a: "b",
667
+ },
602
668
  },
603
669
  ]);
604
670
  });
@@ -810,6 +876,816 @@ test("Runnable streamEvents method with chat model chain", async () => {
810
876
  },
811
877
  ]);
812
878
  });
879
+ test("Chat model that supports streaming, but is invoked, should still emit on_stream events", async () => {
880
+ const template = ChatPromptTemplate.fromMessages([
881
+ ["system", "You are Godzilla"],
882
+ ["human", "{question}"],
883
+ ]).withConfig({
884
+ runName: "my_template",
885
+ tags: ["my_template"],
886
+ });
887
+ const model = new FakeListChatModel({
888
+ responses: ["ROAR"],
889
+ }).withConfig({
890
+ metadata: { a: "b" },
891
+ tags: ["my_model"],
892
+ runName: "my_model",
893
+ });
894
+ const chain = template
895
+ .pipe(async (val, config) => {
896
+ const result = await model.invoke(val, config);
897
+ return result;
898
+ })
899
+ .withConfig({
900
+ metadata: { foo: "bar" },
901
+ tags: ["my_chain"],
902
+ runName: "my_chain",
903
+ });
904
+ const events = [];
905
+ const eventStream = await chain.streamEvents({ question: "hello" }, { version: "v2" });
906
+ for await (const event of eventStream) {
907
+ events.push(event);
908
+ }
909
+ expect(events).toEqual([
910
+ {
911
+ run_id: expect.any(String),
912
+ event: "on_chain_start",
913
+ name: "my_chain",
914
+ tags: ["my_chain"],
915
+ metadata: {
916
+ foo: "bar",
917
+ },
918
+ data: {
919
+ input: {
920
+ question: "hello",
921
+ },
922
+ },
923
+ },
924
+ {
925
+ data: { input: { question: "hello" } },
926
+ event: "on_prompt_start",
927
+ metadata: { foo: "bar" },
928
+ name: "my_template",
929
+ run_id: expect.any(String),
930
+ tags: expect.arrayContaining(["my_chain", "seq:step:1", "my_template"]),
931
+ },
932
+ {
933
+ event: "on_prompt_end",
934
+ name: "my_template",
935
+ run_id: expect.any(String),
936
+ tags: expect.arrayContaining(["seq:step:1", "my_template", "my_chain"]),
937
+ metadata: {
938
+ foo: "bar",
939
+ },
940
+ data: {
941
+ input: {
942
+ question: "hello",
943
+ },
944
+ output: await template.invoke({ question: "hello" }),
945
+ },
946
+ },
947
+ {
948
+ event: "on_chain_start",
949
+ data: {},
950
+ name: "RunnableLambda",
951
+ tags: ["seq:step:2", "my_chain"],
952
+ run_id: expect.any(String),
953
+ metadata: {
954
+ foo: "bar",
955
+ },
956
+ },
957
+ {
958
+ event: "on_chat_model_start",
959
+ name: "my_model",
960
+ run_id: expect.any(String),
961
+ tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]),
962
+ metadata: {
963
+ foo: "bar",
964
+ a: "b",
965
+ ls_model_type: "chat",
966
+ ls_stop: undefined,
967
+ },
968
+ data: {
969
+ input: {
970
+ messages: [
971
+ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")],
972
+ ],
973
+ },
974
+ },
975
+ },
976
+ {
977
+ event: "on_chat_model_stream",
978
+ run_id: expect.any(String),
979
+ tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]),
980
+ metadata: {
981
+ a: "b",
982
+ foo: "bar",
983
+ ls_model_type: "chat",
984
+ ls_stop: undefined,
985
+ },
986
+ name: "my_model",
987
+ data: { chunk: new AIMessageChunk("R") },
988
+ },
989
+ {
990
+ event: "on_chat_model_stream",
991
+ run_id: expect.any(String),
992
+ tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]),
993
+ metadata: {
994
+ a: "b",
995
+ foo: "bar",
996
+ ls_model_type: "chat",
997
+ ls_stop: undefined,
998
+ },
999
+ name: "my_model",
1000
+ data: { chunk: new AIMessageChunk("O") },
1001
+ },
1002
+ {
1003
+ event: "on_chat_model_stream",
1004
+ run_id: expect.any(String),
1005
+ tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]),
1006
+ metadata: {
1007
+ a: "b",
1008
+ foo: "bar",
1009
+ ls_model_type: "chat",
1010
+ ls_stop: undefined,
1011
+ },
1012
+ name: "my_model",
1013
+ data: { chunk: new AIMessageChunk("A") },
1014
+ },
1015
+ {
1016
+ event: "on_chat_model_stream",
1017
+ run_id: expect.any(String),
1018
+ tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]),
1019
+ metadata: {
1020
+ a: "b",
1021
+ foo: "bar",
1022
+ ls_model_type: "chat",
1023
+ ls_stop: undefined,
1024
+ },
1025
+ name: "my_model",
1026
+ data: { chunk: new AIMessageChunk("R") },
1027
+ },
1028
+ {
1029
+ event: "on_chat_model_end",
1030
+ name: "my_model",
1031
+ run_id: expect.any(String),
1032
+ tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]),
1033
+ metadata: {
1034
+ foo: "bar",
1035
+ a: "b",
1036
+ ls_model_type: "chat",
1037
+ ls_stop: undefined,
1038
+ },
1039
+ data: {
1040
+ input: {
1041
+ messages: [
1042
+ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")],
1043
+ ],
1044
+ },
1045
+ output: new AIMessageChunk("ROAR"),
1046
+ },
1047
+ },
1048
+ {
1049
+ event: "on_chain_stream",
1050
+ run_id: expect.any(String),
1051
+ tags: expect.arrayContaining(["seq:step:2", "my_chain"]),
1052
+ metadata: {
1053
+ foo: "bar",
1054
+ },
1055
+ name: "RunnableLambda",
1056
+ data: { chunk: new AIMessageChunk("ROAR") },
1057
+ },
1058
+ {
1059
+ event: "on_chain_stream",
1060
+ run_id: expect.any(String),
1061
+ tags: ["my_chain"],
1062
+ metadata: {
1063
+ foo: "bar",
1064
+ },
1065
+ name: "my_chain",
1066
+ data: { chunk: new AIMessageChunk("ROAR") },
1067
+ },
1068
+ {
1069
+ event: "on_chain_end",
1070
+ name: "RunnableLambda",
1071
+ run_id: expect.any(String),
1072
+ tags: expect.arrayContaining(["seq:step:2", "my_chain"]),
1073
+ metadata: {
1074
+ foo: "bar",
1075
+ },
1076
+ data: {
1077
+ input: await template.invoke({ question: "hello" }),
1078
+ output: new AIMessageChunk("ROAR"),
1079
+ },
1080
+ },
1081
+ {
1082
+ event: "on_chain_end",
1083
+ name: "my_chain",
1084
+ run_id: expect.any(String),
1085
+ tags: ["my_chain"],
1086
+ metadata: {
1087
+ foo: "bar",
1088
+ },
1089
+ data: {
1090
+ output: new AIMessageChunk("ROAR"),
1091
+ },
1092
+ },
1093
+ ]);
1094
+ });
1095
+ test("Chat model that doesn't support streaming, but is invoked, should emit one on_stream event", async () => {
1096
+ const template = ChatPromptTemplate.fromMessages([
1097
+ ["system", "You are Godzilla"],
1098
+ ["human", "{question}"],
1099
+ ]).withConfig({
1100
+ runName: "my_template",
1101
+ tags: ["my_template"],
1102
+ });
1103
+ const model = new FakeChatModel({}).withConfig({
1104
+ metadata: { a: "b" },
1105
+ tags: ["my_model"],
1106
+ runName: "my_model",
1107
+ });
1108
+ const chain = template
1109
+ .pipe(async (val, config) => {
1110
+ const result = await model.invoke(val, config);
1111
+ return result;
1112
+ })
1113
+ .withConfig({
1114
+ metadata: { foo: "bar" },
1115
+ tags: ["my_chain"],
1116
+ runName: "my_chain",
1117
+ });
1118
+ const events = [];
1119
+ const eventStream = await chain.streamEvents({ question: "hello" }, { version: "v2" });
1120
+ for await (const event of eventStream) {
1121
+ events.push(event);
1122
+ }
1123
+ expect(events).toEqual([
1124
+ {
1125
+ run_id: expect.any(String),
1126
+ event: "on_chain_start",
1127
+ name: "my_chain",
1128
+ tags: ["my_chain"],
1129
+ metadata: {
1130
+ foo: "bar",
1131
+ },
1132
+ data: {
1133
+ input: {
1134
+ question: "hello",
1135
+ },
1136
+ },
1137
+ },
1138
+ {
1139
+ data: { input: { question: "hello" } },
1140
+ event: "on_prompt_start",
1141
+ metadata: { foo: "bar" },
1142
+ name: "my_template",
1143
+ run_id: expect.any(String),
1144
+ tags: expect.arrayContaining(["my_chain", "seq:step:1", "my_template"]),
1145
+ },
1146
+ {
1147
+ event: "on_prompt_end",
1148
+ name: "my_template",
1149
+ run_id: expect.any(String),
1150
+ tags: expect.arrayContaining(["seq:step:1", "my_template", "my_chain"]),
1151
+ metadata: {
1152
+ foo: "bar",
1153
+ },
1154
+ data: {
1155
+ input: {
1156
+ question: "hello",
1157
+ },
1158
+ output: await template.invoke({ question: "hello" }),
1159
+ },
1160
+ },
1161
+ {
1162
+ event: "on_chain_start",
1163
+ data: {},
1164
+ name: "RunnableLambda",
1165
+ tags: ["seq:step:2", "my_chain"],
1166
+ run_id: expect.any(String),
1167
+ metadata: {
1168
+ foo: "bar",
1169
+ },
1170
+ },
1171
+ {
1172
+ event: "on_chat_model_start",
1173
+ name: "my_model",
1174
+ run_id: expect.any(String),
1175
+ tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]),
1176
+ metadata: {
1177
+ foo: "bar",
1178
+ a: "b",
1179
+ ls_model_type: "chat",
1180
+ ls_stop: undefined,
1181
+ },
1182
+ data: {
1183
+ input: {
1184
+ messages: [
1185
+ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")],
1186
+ ],
1187
+ },
1188
+ },
1189
+ },
1190
+ {
1191
+ event: "on_chat_model_stream",
1192
+ run_id: expect.any(String),
1193
+ tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]),
1194
+ metadata: {
1195
+ a: "b",
1196
+ foo: "bar",
1197
+ ls_model_type: "chat",
1198
+ ls_stop: undefined,
1199
+ },
1200
+ name: "my_model",
1201
+ data: { chunk: new AIMessageChunk("You are Godzilla\nhello") },
1202
+ },
1203
+ {
1204
+ event: "on_chat_model_end",
1205
+ name: "my_model",
1206
+ run_id: expect.any(String),
1207
+ tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]),
1208
+ metadata: {
1209
+ foo: "bar",
1210
+ a: "b",
1211
+ ls_model_type: "chat",
1212
+ ls_stop: undefined,
1213
+ },
1214
+ data: {
1215
+ input: {
1216
+ messages: [
1217
+ [new SystemMessage("You are Godzilla"), new HumanMessage("hello")],
1218
+ ],
1219
+ },
1220
+ output: new AIMessage("You are Godzilla\nhello"),
1221
+ },
1222
+ },
1223
+ {
1224
+ event: "on_chain_stream",
1225
+ run_id: expect.any(String),
1226
+ tags: expect.arrayContaining(["seq:step:2", "my_chain"]),
1227
+ metadata: {
1228
+ foo: "bar",
1229
+ },
1230
+ name: "RunnableLambda",
1231
+ data: { chunk: new AIMessage("You are Godzilla\nhello") },
1232
+ },
1233
+ {
1234
+ event: "on_chain_stream",
1235
+ run_id: expect.any(String),
1236
+ tags: ["my_chain"],
1237
+ metadata: {
1238
+ foo: "bar",
1239
+ },
1240
+ name: "my_chain",
1241
+ data: { chunk: new AIMessage("You are Godzilla\nhello") },
1242
+ },
1243
+ {
1244
+ event: "on_chain_end",
1245
+ name: "RunnableLambda",
1246
+ run_id: expect.any(String),
1247
+ tags: expect.arrayContaining(["seq:step:2", "my_chain"]),
1248
+ metadata: {
1249
+ foo: "bar",
1250
+ },
1251
+ data: {
1252
+ input: await template.invoke({ question: "hello" }),
1253
+ output: new AIMessage("You are Godzilla\nhello"),
1254
+ },
1255
+ },
1256
+ {
1257
+ event: "on_chain_end",
1258
+ name: "my_chain",
1259
+ run_id: expect.any(String),
1260
+ tags: ["my_chain"],
1261
+ metadata: {
1262
+ foo: "bar",
1263
+ },
1264
+ data: {
1265
+ output: new AIMessage("You are Godzilla\nhello"),
1266
+ },
1267
+ },
1268
+ ]);
1269
+ });
1270
+ test("LLM that supports streaming, but is invoked, should still emit on_stream events", async () => {
1271
+ const template = PromptTemplate.fromTemplate(`You are Godzilla\n{question}`).withConfig({
1272
+ runName: "my_template",
1273
+ tags: ["my_template"],
1274
+ });
1275
+ const model = new FakeStreamingLLM({
1276
+ responses: ["ROAR"],
1277
+ }).withConfig({
1278
+ metadata: { a: "b" },
1279
+ tags: ["my_model"],
1280
+ runName: "my_model",
1281
+ });
1282
+ const chain = template
1283
+ .pipe(async (val, config) => {
1284
+ const result = await model.invoke(val, config);
1285
+ return result;
1286
+ })
1287
+ .withConfig({
1288
+ metadata: { foo: "bar" },
1289
+ tags: ["my_chain"],
1290
+ runName: "my_chain",
1291
+ });
1292
+ const events = [];
1293
+ const eventStream = await chain.streamEvents({ question: "hello" }, { version: "v2" });
1294
+ for await (const event of eventStream) {
1295
+ events.push(event);
1296
+ }
1297
+ expect(events).toEqual([
1298
+ {
1299
+ event: "on_chain_start",
1300
+ data: {
1301
+ input: {
1302
+ question: "hello",
1303
+ },
1304
+ },
1305
+ name: "my_chain",
1306
+ tags: ["my_chain"],
1307
+ run_id: expect.any(String),
1308
+ metadata: {
1309
+ foo: "bar",
1310
+ },
1311
+ },
1312
+ {
1313
+ event: "on_prompt_start",
1314
+ data: {
1315
+ input: {
1316
+ question: "hello",
1317
+ },
1318
+ },
1319
+ name: "my_template",
1320
+ tags: ["seq:step:1", "my_template", "my_chain"],
1321
+ run_id: expect.any(String),
1322
+ metadata: {
1323
+ foo: "bar",
1324
+ },
1325
+ },
1326
+ {
1327
+ event: "on_prompt_end",
1328
+ data: {
1329
+ output: await template.invoke({ question: "hello" }),
1330
+ input: {
1331
+ question: "hello",
1332
+ },
1333
+ },
1334
+ run_id: expect.any(String),
1335
+ name: "my_template",
1336
+ tags: ["seq:step:1", "my_template", "my_chain"],
1337
+ metadata: {
1338
+ foo: "bar",
1339
+ },
1340
+ },
1341
+ {
1342
+ event: "on_chain_start",
1343
+ data: {},
1344
+ name: "RunnableLambda",
1345
+ tags: ["seq:step:2", "my_chain"],
1346
+ run_id: expect.any(String),
1347
+ metadata: {
1348
+ foo: "bar",
1349
+ },
1350
+ },
1351
+ {
1352
+ event: "on_llm_start",
1353
+ data: {
1354
+ input: {
1355
+ prompts: ["You are Godzilla\nhello"],
1356
+ },
1357
+ },
1358
+ name: "my_model",
1359
+ tags: ["seq:step:2", "my_model", "my_chain"],
1360
+ run_id: expect.any(String),
1361
+ metadata: {
1362
+ foo: "bar",
1363
+ a: "b",
1364
+ },
1365
+ },
1366
+ {
1367
+ event: "on_llm_stream",
1368
+ data: {
1369
+ chunk: new GenerationChunk({
1370
+ text: "R",
1371
+ }),
1372
+ },
1373
+ run_id: expect.any(String),
1374
+ name: "my_model",
1375
+ tags: ["seq:step:2", "my_model", "my_chain"],
1376
+ metadata: {
1377
+ foo: "bar",
1378
+ a: "b",
1379
+ },
1380
+ },
1381
+ {
1382
+ event: "on_llm_stream",
1383
+ data: {
1384
+ chunk: new GenerationChunk({
1385
+ text: "O",
1386
+ }),
1387
+ },
1388
+ run_id: expect.any(String),
1389
+ name: "my_model",
1390
+ tags: ["seq:step:2", "my_model", "my_chain"],
1391
+ metadata: {
1392
+ foo: "bar",
1393
+ a: "b",
1394
+ },
1395
+ },
1396
+ {
1397
+ event: "on_llm_stream",
1398
+ data: {
1399
+ chunk: new GenerationChunk({
1400
+ text: "A",
1401
+ }),
1402
+ },
1403
+ run_id: expect.any(String),
1404
+ name: "my_model",
1405
+ tags: ["seq:step:2", "my_model", "my_chain"],
1406
+ metadata: {
1407
+ foo: "bar",
1408
+ a: "b",
1409
+ },
1410
+ },
1411
+ {
1412
+ event: "on_llm_stream",
1413
+ data: {
1414
+ chunk: new GenerationChunk({
1415
+ text: "R",
1416
+ }),
1417
+ },
1418
+ run_id: expect.any(String),
1419
+ name: "my_model",
1420
+ tags: ["seq:step:2", "my_model", "my_chain"],
1421
+ metadata: {
1422
+ foo: "bar",
1423
+ a: "b",
1424
+ },
1425
+ },
1426
+ {
1427
+ event: "on_llm_end",
1428
+ data: {
1429
+ output: {
1430
+ generations: [
1431
+ [
1432
+ {
1433
+ text: "ROAR",
1434
+ generationInfo: {},
1435
+ },
1436
+ ],
1437
+ ],
1438
+ llmOutput: {},
1439
+ },
1440
+ input: {
1441
+ prompts: ["You are Godzilla\nhello"],
1442
+ },
1443
+ },
1444
+ run_id: expect.any(String),
1445
+ name: "my_model",
1446
+ tags: ["seq:step:2", "my_model", "my_chain"],
1447
+ metadata: {
1448
+ foo: "bar",
1449
+ a: "b",
1450
+ },
1451
+ },
1452
+ {
1453
+ event: "on_chain_stream",
1454
+ run_id: expect.any(String),
1455
+ name: "RunnableLambda",
1456
+ tags: ["seq:step:2", "my_chain"],
1457
+ metadata: {
1458
+ foo: "bar",
1459
+ },
1460
+ data: {
1461
+ chunk: "ROAR",
1462
+ },
1463
+ },
1464
+ {
1465
+ event: "on_chain_stream",
1466
+ run_id: expect.any(String),
1467
+ name: "my_chain",
1468
+ tags: ["my_chain"],
1469
+ metadata: {
1470
+ foo: "bar",
1471
+ },
1472
+ data: {
1473
+ chunk: "ROAR",
1474
+ },
1475
+ },
1476
+ {
1477
+ event: "on_chain_end",
1478
+ data: {
1479
+ output: "ROAR",
1480
+ input: await template.invoke({ question: "hello" }),
1481
+ },
1482
+ run_id: expect.any(String),
1483
+ name: "RunnableLambda",
1484
+ tags: ["seq:step:2", "my_chain"],
1485
+ metadata: {
1486
+ foo: "bar",
1487
+ },
1488
+ },
1489
+ {
1490
+ event: "on_chain_end",
1491
+ data: {
1492
+ output: "ROAR",
1493
+ },
1494
+ run_id: expect.any(String),
1495
+ name: "my_chain",
1496
+ tags: ["my_chain"],
1497
+ metadata: {
1498
+ foo: "bar",
1499
+ },
1500
+ },
1501
+ ]);
1502
+ });
1503
+ test("LLM that doesn't support streaming, but is invoked, should emit one on_stream event", async () => {
1504
+ const template = PromptTemplate.fromTemplate(`You are Godzilla\n{question}`).withConfig({
1505
+ runName: "my_template",
1506
+ tags: ["my_template"],
1507
+ });
1508
+ const model = new FakeLLM({}).withConfig({
1509
+ metadata: { a: "b" },
1510
+ tags: ["my_model"],
1511
+ runName: "my_model",
1512
+ });
1513
+ const chain = template
1514
+ .pipe(async (val, config) => {
1515
+ const result = await model.invoke(val, config);
1516
+ return result;
1517
+ })
1518
+ .withConfig({
1519
+ metadata: { foo: "bar" },
1520
+ tags: ["my_chain"],
1521
+ runName: "my_chain",
1522
+ });
1523
+ const events = [];
1524
+ const eventStream = await chain.streamEvents({ question: "hello" }, { version: "v2" });
1525
+ for await (const event of eventStream) {
1526
+ events.push(event);
1527
+ }
1528
+ expect(events).toEqual([
1529
+ {
1530
+ event: "on_chain_start",
1531
+ data: {
1532
+ input: {
1533
+ question: "hello",
1534
+ },
1535
+ },
1536
+ name: "my_chain",
1537
+ tags: ["my_chain"],
1538
+ run_id: expect.any(String),
1539
+ metadata: {
1540
+ foo: "bar",
1541
+ },
1542
+ },
1543
+ {
1544
+ event: "on_prompt_start",
1545
+ data: {
1546
+ input: {
1547
+ question: "hello",
1548
+ },
1549
+ },
1550
+ name: "my_template",
1551
+ tags: ["seq:step:1", "my_template", "my_chain"],
1552
+ run_id: expect.any(String),
1553
+ metadata: {
1554
+ foo: "bar",
1555
+ },
1556
+ },
1557
+ {
1558
+ event: "on_prompt_end",
1559
+ data: {
1560
+ output: await template.invoke({ question: "hello" }),
1561
+ input: {
1562
+ question: "hello",
1563
+ },
1564
+ },
1565
+ run_id: expect.any(String),
1566
+ name: "my_template",
1567
+ tags: ["seq:step:1", "my_template", "my_chain"],
1568
+ metadata: {
1569
+ foo: "bar",
1570
+ },
1571
+ },
1572
+ {
1573
+ event: "on_chain_start",
1574
+ data: {},
1575
+ name: "RunnableLambda",
1576
+ tags: ["seq:step:2", "my_chain"],
1577
+ run_id: expect.any(String),
1578
+ metadata: {
1579
+ foo: "bar",
1580
+ },
1581
+ },
1582
+ {
1583
+ event: "on_llm_start",
1584
+ data: {
1585
+ input: {
1586
+ prompts: ["You are Godzilla\nhello"],
1587
+ },
1588
+ },
1589
+ name: "my_model",
1590
+ tags: ["seq:step:2", "my_model", "my_chain"],
1591
+ run_id: expect.any(String),
1592
+ metadata: {
1593
+ foo: "bar",
1594
+ a: "b",
1595
+ },
1596
+ },
1597
+ {
1598
+ event: "on_llm_stream",
1599
+ data: {
1600
+ chunk: new GenerationChunk({
1601
+ text: "You are Godzilla\nhello",
1602
+ }),
1603
+ },
1604
+ run_id: expect.any(String),
1605
+ name: "my_model",
1606
+ tags: ["seq:step:2", "my_model", "my_chain"],
1607
+ metadata: {
1608
+ foo: "bar",
1609
+ a: "b",
1610
+ },
1611
+ },
1612
+ {
1613
+ event: "on_llm_end",
1614
+ data: {
1615
+ output: {
1616
+ generations: [
1617
+ [
1618
+ {
1619
+ text: "You are Godzilla\nhello",
1620
+ generationInfo: undefined,
1621
+ },
1622
+ ],
1623
+ ],
1624
+ llmOutput: {},
1625
+ },
1626
+ input: {
1627
+ prompts: ["You are Godzilla\nhello"],
1628
+ },
1629
+ },
1630
+ run_id: expect.any(String),
1631
+ name: "my_model",
1632
+ tags: ["seq:step:2", "my_model", "my_chain"],
1633
+ metadata: {
1634
+ foo: "bar",
1635
+ a: "b",
1636
+ },
1637
+ },
1638
+ {
1639
+ event: "on_chain_stream",
1640
+ run_id: expect.any(String),
1641
+ name: "RunnableLambda",
1642
+ tags: ["seq:step:2", "my_chain"],
1643
+ metadata: {
1644
+ foo: "bar",
1645
+ },
1646
+ data: {
1647
+ chunk: "You are Godzilla\nhello",
1648
+ },
1649
+ },
1650
+ {
1651
+ event: "on_chain_stream",
1652
+ run_id: expect.any(String),
1653
+ name: "my_chain",
1654
+ tags: ["my_chain"],
1655
+ metadata: {
1656
+ foo: "bar",
1657
+ },
1658
+ data: {
1659
+ chunk: "You are Godzilla\nhello",
1660
+ },
1661
+ },
1662
+ {
1663
+ event: "on_chain_end",
1664
+ data: {
1665
+ output: "You are Godzilla\nhello",
1666
+ input: await template.invoke({ question: "hello" }),
1667
+ },
1668
+ run_id: expect.any(String),
1669
+ name: "RunnableLambda",
1670
+ tags: ["seq:step:2", "my_chain"],
1671
+ metadata: {
1672
+ foo: "bar",
1673
+ },
1674
+ },
1675
+ {
1676
+ event: "on_chain_end",
1677
+ data: {
1678
+ output: "You are Godzilla\nhello",
1679
+ },
1680
+ run_id: expect.any(String),
1681
+ name: "my_chain",
1682
+ tags: ["my_chain"],
1683
+ metadata: {
1684
+ foo: "bar",
1685
+ },
1686
+ },
1687
+ ]);
1688
+ });
813
1689
  test("Runnable streamEvents method with simple tools", async () => {
814
1690
  const tool = new DynamicTool({
815
1691
  func: async () => "hello",