promptlayer 1.0.61 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/README.md +31 -0
  2. package/dist/claude-agents.d.mts +20 -0
  3. package/dist/claude-agents.d.ts +20 -0
  4. package/dist/claude-agents.js +2 -0
  5. package/dist/claude-agents.js.map +1 -0
  6. package/dist/esm/chunk-DFBRFJOL.js +2 -0
  7. package/dist/esm/chunk-DFBRFJOL.js.map +1 -0
  8. package/dist/esm/claude-agents.js +2 -0
  9. package/dist/esm/claude-agents.js.map +1 -0
  10. package/dist/esm/index.js +2 -2
  11. package/dist/esm/index.js.map +1 -1
  12. package/dist/esm/openai-agents.js +3 -0
  13. package/dist/esm/openai-agents.js.map +1 -0
  14. package/dist/index.js +2 -2
  15. package/dist/index.js.map +1 -1
  16. package/dist/openai-agents.d.mts +42 -0
  17. package/dist/openai-agents.d.ts +42 -0
  18. package/dist/openai-agents.js +3 -0
  19. package/dist/openai-agents.js.map +1 -0
  20. package/package.json +40 -2
  21. package/vendor/claude-agents/trace/.claude-plugin/plugin.json +8 -0
  22. package/vendor/claude-agents/trace/hooks/hook_utils.py +38 -0
  23. package/vendor/claude-agents/trace/hooks/hooks.json +60 -0
  24. package/vendor/claude-agents/trace/hooks/lib.sh +577 -0
  25. package/vendor/claude-agents/trace/hooks/parse_stop_transcript.py +375 -0
  26. package/vendor/claude-agents/trace/hooks/post_tool_use.sh +41 -0
  27. package/vendor/claude-agents/trace/hooks/session_end.sh +37 -0
  28. package/vendor/claude-agents/trace/hooks/session_start.sh +57 -0
  29. package/vendor/claude-agents/trace/hooks/stop_hook.sh +123 -0
  30. package/vendor/claude-agents/trace/hooks/user_prompt_submit.sh +25 -0
  31. package/vendor/claude-agents/vendor_metadata.json +5 -0
  32. package/.github/CODEOWNERS +0 -1
  33. package/.github/workflows/node.js.yml +0 -30
  34. package/.github/workflows/npm-publish.yml +0 -35
  35. package/src/groups.ts +0 -16
  36. package/src/index.ts +0 -383
  37. package/src/promptlayer.ts +0 -125
  38. package/src/run-error-tracking.test.ts +0 -146
  39. package/src/span-exporter.ts +0 -120
  40. package/src/span-wrapper.ts +0 -51
  41. package/src/templates.ts +0 -37
  42. package/src/tracing.ts +0 -20
  43. package/src/track.ts +0 -84
  44. package/src/types.ts +0 -689
  45. package/src/utils/blueprint-builder.test.ts +0 -727
  46. package/src/utils/blueprint-builder.ts +0 -1453
  47. package/src/utils/errors.test.ts +0 -68
  48. package/src/utils/errors.ts +0 -62
  49. package/src/utils/streaming.test.ts +0 -498
  50. package/src/utils/streaming.ts +0 -1402
  51. package/src/utils/utils.ts +0 -1228
  52. package/tsconfig.json +0 -115
  53. package/tsup.config.ts +0 -17
  54. package/vitest.config.ts +0 -6
@@ -1,68 +0,0 @@
1
- import { describe, it, expect } from "vitest";
2
- import { categorizeError, ErrorType } from "@/utils/errors";
3
- import {
4
- RateLimitError,
5
- AuthenticationError,
6
- APIConnectionTimeoutError,
7
- BadRequestError,
8
- InternalServerError,
9
- } from "openai";
10
-
11
- describe("categorizeError", () => {
12
- // Branch: statusCode === 429 (+ className matches /ratelimit/i)
13
- it("returns PROVIDER_RATE_LIMIT for OpenAI RateLimitError", () => {
14
- const err = new RateLimitError(429, undefined, "Rate limit exceeded", undefined);
15
- expect(categorizeError(err)).toBe(ErrorType.PROVIDER_RATE_LIMIT);
16
- });
17
-
18
- // Branch: className matches /timeout/i
19
- it("returns PROVIDER_TIMEOUT for OpenAI APIConnectionTimeoutError", () => {
20
- const err = new APIConnectionTimeoutError({ message: "Request timed out." });
21
- expect(categorizeError(err)).toBe(ErrorType.PROVIDER_TIMEOUT);
22
- });
23
-
24
- // Branch: statusCode === 401 (+ className matches /authentication/i)
25
- it("returns PROVIDER_AUTH_ERROR for OpenAI AuthenticationError", () => {
26
- const err = new AuthenticationError(401, undefined, "Invalid API key", undefined);
27
- expect(categorizeError(err)).toBe(ErrorType.PROVIDER_AUTH_ERROR);
28
- });
29
-
30
- // Branch: message includes "quota"
31
- it("returns PROVIDER_QUOTA_LIMIT when message contains quota", () => {
32
- expect(categorizeError(new Error("You exceeded your quota"))).toBe(
33
- ErrorType.PROVIDER_QUOTA_LIMIT
34
- );
35
- });
36
-
37
- // Branch: message includes "timeout"
38
- it("returns PROVIDER_TIMEOUT when message contains timeout", () => {
39
- expect(categorizeError(new Error("Request timeout"))).toBe(
40
- ErrorType.PROVIDER_TIMEOUT
41
- );
42
- });
43
-
44
- // Branch: message includes "timed out"
45
- it("returns PROVIDER_TIMEOUT when message contains timed out", () => {
46
- expect(categorizeError(new Error("Connection timed out"))).toBe(
47
- ErrorType.PROVIDER_TIMEOUT
48
- );
49
- });
50
-
51
- // Branch: statusCode defined but no earlier match → PROVIDER_ERROR
52
- it("returns PROVIDER_ERROR for OpenAI InternalServerError", () => {
53
- const err = new InternalServerError(500, undefined, "Internal server error", undefined);
54
- expect(categorizeError(err)).toBe(ErrorType.PROVIDER_ERROR);
55
- });
56
-
57
- // Branch: fallthrough — no status, no matching class/message
58
- it("returns UNKNOWN_ERROR for a plain Error", () => {
59
- expect(categorizeError(new Error("something broke"))).toBe(
60
- ErrorType.UNKNOWN_ERROR
61
- );
62
- });
63
-
64
- // Branch: non-Error thrown → String(error) path, getClassName returns ""
65
- it("returns UNKNOWN_ERROR for a non-Error value", () => {
66
- expect(categorizeError("oops")).toBe(ErrorType.UNKNOWN_ERROR);
67
- });
68
- });
@@ -1,62 +0,0 @@
1
- export enum ErrorType {
2
- PROVIDER_RATE_LIMIT = "PROVIDER_RATE_LIMIT",
3
- PROVIDER_QUOTA_LIMIT = "PROVIDER_QUOTA_LIMIT",
4
- PROVIDER_TIMEOUT = "PROVIDER_TIMEOUT",
5
- PROVIDER_AUTH_ERROR = "PROVIDER_AUTH_ERROR",
6
- PROVIDER_ERROR = "PROVIDER_ERROR",
7
- UNKNOWN_ERROR = "UNKNOWN_ERROR",
8
- }
9
-
10
- function getStatusCode(error: unknown): number | undefined {
11
- if (
12
- error &&
13
- typeof error === "object" &&
14
- "status" in error &&
15
- typeof (error as any).status === "number"
16
- ) {
17
- return (error as any).status;
18
- }
19
- return undefined;
20
- }
21
-
22
- function getClassName(error: unknown): string {
23
- if (error && typeof error === "object" && error.constructor) {
24
- return error.constructor.name;
25
- }
26
- return "";
27
- }
28
-
29
- export function categorizeError(error: unknown): ErrorType {
30
- const statusCode = getStatusCode(error);
31
- const className = getClassName(error);
32
- const message =
33
- error instanceof Error
34
- ? error.message.toLowerCase()
35
- : String(error).toLowerCase();
36
-
37
- if (statusCode === 429 || /ratelimit/i.test(className)) {
38
- return ErrorType.PROVIDER_RATE_LIMIT;
39
- }
40
-
41
- if (/timeout/i.test(className)) {
42
- return ErrorType.PROVIDER_TIMEOUT;
43
- }
44
-
45
- if (statusCode === 401 || /authentication/i.test(className)) {
46
- return ErrorType.PROVIDER_AUTH_ERROR;
47
- }
48
-
49
- if (message.includes("quota")) {
50
- return ErrorType.PROVIDER_QUOTA_LIMIT;
51
- }
52
-
53
- if (message.includes("timeout") || message.includes("timed out")) {
54
- return ErrorType.PROVIDER_TIMEOUT;
55
- }
56
-
57
- if (statusCode !== undefined) {
58
- return ErrorType.PROVIDER_ERROR;
59
- }
60
-
61
- return ErrorType.UNKNOWN_ERROR;
62
- }
@@ -1,498 +0,0 @@
1
- import { describe, it, expect } from "vitest";
2
- import {
3
- anthropicStreamMessage,
4
- googleStreamChat,
5
- bedrockStreamMessage,
6
- openaiStreamChat,
7
- } from "@/utils/streaming";
8
-
9
- describe("anthropicStreamMessage", () => {
10
- it("merges server_tool_use block with input_json_delta into content", () => {
11
- const results = [
12
- {
13
- type: "message_start" as const,
14
- message: {
15
- id: "msg_01",
16
- model: "claude-sonnet-4-6",
17
- type: "message" as const,
18
- role: "assistant" as const,
19
- content: [],
20
- stop_reason: null,
21
- stop_sequence: null,
22
- usage: { input_tokens: 0, output_tokens: 0, cache_creation_input_tokens: 0, cache_read_input_tokens: 0, server_tool_use: null, service_tier: null },
23
- },
24
- },
25
- {
26
- type: "content_block_start" as const,
27
- index: 0,
28
- content_block: {
29
- type: "server_tool_use" as const,
30
- id: "srvtoolu_01",
31
- name: "bash_code_execution",
32
- input: {},
33
- caller: { type: "direct" as const },
34
- },
35
- },
36
- {
37
- type: "content_block_delta" as const,
38
- index: 0,
39
- delta: { type: "input_json_delta" as const, partial_json: '{"command":"echo hello"}' },
40
- },
41
- {
42
- type: "content_block_stop" as const,
43
- index: 0,
44
- },
45
- ];
46
- const message = anthropicStreamMessage(results as any);
47
- expect(message.content).toHaveLength(1);
48
- expect(message.content![0]).toMatchObject({
49
- type: "server_tool_use",
50
- id: "srvtoolu_01",
51
- name: "bash_code_execution",
52
- input: { command: "echo hello" },
53
- });
54
- });
55
-
56
- it("merges tool_use block with input_json_delta into content", () => {
57
- const results = [
58
- {
59
- type: "message_start" as const,
60
- message: {
61
- id: "msg_01",
62
- model: "claude-sonnet-4-6",
63
- type: "message" as const,
64
- role: "assistant" as const,
65
- content: [],
66
- stop_reason: null,
67
- stop_sequence: null,
68
- usage: { input_tokens: 0, output_tokens: 0, cache_creation_input_tokens: 0, cache_read_input_tokens: 0, server_tool_use: null, service_tier: null },
69
- },
70
- },
71
- {
72
- type: "content_block_start" as const,
73
- index: 0,
74
- content_block: {
75
- type: "tool_use" as const,
76
- id: "toolu_01",
77
- name: "get_weather",
78
- input: {},
79
- },
80
- },
81
- {
82
- type: "content_block_delta" as const,
83
- index: 0,
84
- delta: { type: "input_json_delta" as const, partial_json: '{"location":"NYC"}' },
85
- },
86
- {
87
- type: "content_block_stop" as const,
88
- index: 0,
89
- },
90
- ];
91
- const message = anthropicStreamMessage(results as any);
92
- expect(message.content).toHaveLength(1);
93
- expect(message.content![0]).toMatchObject({
94
- type: "tool_use",
95
- id: "toolu_01",
96
- name: "get_weather",
97
- input: { location: "NYC" },
98
- });
99
- });
100
-
101
- it("pushes bash_code_execution_tool_result block on content_block_stop", () => {
102
- const results = [
103
- {
104
- type: "message_start" as const,
105
- message: {
106
- id: "msg_01",
107
- model: "claude-sonnet-4-6",
108
- type: "message" as const,
109
- role: "assistant" as const,
110
- content: [],
111
- stop_reason: null,
112
- stop_sequence: null,
113
- usage: { input_tokens: 0, output_tokens: 0, cache_creation_input_tokens: 0, cache_read_input_tokens: 0, server_tool_use: null, service_tier: null },
114
- },
115
- },
116
- {
117
- type: "content_block_start" as const,
118
- index: 1,
119
- content_block: {
120
- type: "bash_code_execution_tool_result" as const,
121
- tool_use_id: "srvtoolu_01F9YUsLV5DCRx2JbnBXL1hL",
122
- content: {
123
- type: "bash_code_execution_result",
124
- stdout: "First 10 Prime Numbers:\n1. 2\n2. 3\n",
125
- stderr: "",
126
- return_code: 0,
127
- content: [],
128
- },
129
- },
130
- },
131
- {
132
- type: "content_block_stop" as const,
133
- index: 1,
134
- },
135
- ];
136
- const message = anthropicStreamMessage(results as any);
137
- expect(message.content).toHaveLength(1);
138
- expect(message.content![0]).toMatchObject({
139
- type: "bash_code_execution_tool_result",
140
- tool_use_id: "srvtoolu_01F9YUsLV5DCRx2JbnBXL1hL",
141
- content: {
142
- type: "bash_code_execution_result",
143
- stdout: "First 10 Prime Numbers:\n1. 2\n2. 3\n",
144
- stderr: "",
145
- return_code: 0,
146
- content: [],
147
- },
148
- });
149
- });
150
-
151
- it("attaches citations_delta to text block by index", () => {
152
- const results = [
153
- {
154
- type: "message_start" as const,
155
- message: {
156
- id: "msg_01",
157
- model: "claude-sonnet-4-6",
158
- type: "message" as const,
159
- role: "assistant" as const,
160
- content: [],
161
- stop_reason: null,
162
- stop_sequence: null,
163
- usage: { input_tokens: 0, output_tokens: 0, cache_creation_input_tokens: 0, cache_read_input_tokens: 0, server_tool_use: null, service_tier: null },
164
- },
165
- },
166
- {
167
- type: "content_block_start" as const,
168
- index: 0,
169
- content_block: { type: "text" as const },
170
- },
171
- {
172
- type: "content_block_delta" as const,
173
- index: 0,
174
- delta: { type: "text_delta" as const, text: "The rate is 279.25." },
175
- },
176
- {
177
- type: "content_block_delta" as const,
178
- index: 0,
179
- delta: {
180
- type: "citations_delta" as const,
181
- citation: {
182
- type: "web_search_result_location" as const,
183
- cited_text: "The rate is 279.25.",
184
- url: "https://example.com",
185
- title: "Example",
186
- encrypted_index: "enc",
187
- },
188
- },
189
- },
190
- {
191
- type: "content_block_stop" as const,
192
- index: 0,
193
- },
194
- ];
195
- const message = anthropicStreamMessage(results as any);
196
- expect(message.content).toHaveLength(1);
197
- expect(message.content![0]).toMatchObject({
198
- type: "text",
199
- text: "The rate is 279.25.",
200
- citations: null,
201
- annotations: [
202
- {
203
- type: "url_citation",
204
- url: "https://example.com",
205
- title: "Example",
206
- cited_text: "The rate is 279.25.",
207
- encrypted_index: "enc",
208
- },
209
- ],
210
- });
211
- });
212
-
213
- it("merges multiple blocks in order (server_tool_use, bash result, text)", () => {
214
- const results = [
215
- {
216
- type: "message_start" as const,
217
- message: {
218
- id: "msg_01",
219
- model: "claude-sonnet-4-6",
220
- type: "message" as const,
221
- role: "assistant" as const,
222
- content: [],
223
- stop_reason: null,
224
- stop_sequence: null,
225
- usage: { input_tokens: 0, output_tokens: 0, cache_creation_input_tokens: 0, cache_read_input_tokens: 0, server_tool_use: null, service_tier: null },
226
- },
227
- },
228
- {
229
- type: "content_block_start" as const,
230
- index: 0,
231
- content_block: {
232
- type: "server_tool_use" as const,
233
- id: "srvtoolu_01",
234
- name: "bash_code_execution",
235
- input: {},
236
- caller: { type: "direct" as const },
237
- },
238
- },
239
- {
240
- type: "content_block_delta" as const,
241
- index: 0,
242
- delta: { type: "input_json_delta" as const, partial_json: '{"command":"echo hi"}' },
243
- },
244
- { type: "content_block_stop" as const, index: 0 },
245
- {
246
- type: "content_block_start" as const,
247
- index: 1,
248
- content_block: {
249
- type: "bash_code_execution_tool_result" as const,
250
- tool_use_id: "srvtoolu_01",
251
- content: { type: "bash_code_execution_result", stdout: "hi\n", stderr: "", return_code: 0, content: [] },
252
- },
253
- },
254
- { type: "content_block_stop" as const, index: 1 },
255
- {
256
- type: "content_block_start" as const,
257
- index: 2,
258
- content_block: { type: "text" as const },
259
- },
260
- {
261
- type: "content_block_delta" as const,
262
- index: 2,
263
- delta: { type: "text_delta" as const, text: "Done!" },
264
- },
265
- { type: "content_block_stop" as const, index: 2 },
266
- ];
267
- const message = anthropicStreamMessage(results as any);
268
- expect(message.content).toHaveLength(3);
269
- expect(message.content![0]).toMatchObject({ type: "server_tool_use", input: { command: "echo hi" } });
270
- expect(message.content![1]).toMatchObject({ type: "bash_code_execution_tool_result", tool_use_id: "srvtoolu_01" });
271
- expect(message.content![2]).toMatchObject({ type: "text", text: "Done!" });
272
- });
273
- });
274
-
275
- describe("googleStreamChat", () => {
276
- it("merges text parts from multiple chunks", () => {
277
- const results = [
278
- {
279
- candidates: [
280
- {
281
- content: { parts: [{ text: "Hello " }] },
282
- },
283
- ],
284
- },
285
- {
286
- candidates: [
287
- {
288
- content: { parts: [{ text: "world" }] },
289
- },
290
- ],
291
- },
292
- ];
293
- const response = googleStreamChat(results as any);
294
- expect(response.candidates).toHaveLength(1);
295
- expect(response.candidates![0].content.parts).toHaveLength(1);
296
- expect(response.candidates![0].content.parts[0].text).toBe("Hello world");
297
- });
298
-
299
- it("merges thought and regular text parts", () => {
300
- const results = [
301
- {
302
- candidates: [
303
- {
304
- content: {
305
- parts: [
306
- { text: "Reasoning.", thought: true, thoughtSignature: "sig1" },
307
- { text: "Answer." },
308
- ],
309
- },
310
- },
311
- ],
312
- },
313
- ];
314
- const response = googleStreamChat(results as any);
315
- const parts = response.candidates![0].content.parts;
316
- expect(parts).toHaveLength(2);
317
- expect(parts[0]).toMatchObject({ text: "Reasoning.", thought: true, thoughtSignature: "sig1" });
318
- expect(parts[1]).toMatchObject({ text: "Answer." });
319
- });
320
-
321
- it("collects functionCall parts", () => {
322
- const results = [
323
- {
324
- candidates: [
325
- {
326
- content: {
327
- parts: [
328
- { functionCall: { id: "fc_01", name: "get_weather", args: { location: "NYC" } } },
329
- ],
330
- },
331
- },
332
- ],
333
- },
334
- ];
335
- const response = googleStreamChat(results as any);
336
- const parts = response.candidates![0].content.parts;
337
- expect(parts).toHaveLength(1);
338
- expect(parts[0].functionCall).toMatchObject({
339
- id: "fc_01",
340
- name: "get_weather",
341
- args: { location: "NYC" },
342
- });
343
- });
344
- });
345
-
346
- describe("bedrockStreamMessage", () => {
347
- it("merges text deltas into single text block", () => {
348
- const results = [
349
- {
350
- contentBlockDelta: { delta: { text: "Hello " } },
351
- },
352
- {
353
- contentBlockDelta: { delta: { text: "world" } },
354
- },
355
- {
356
- contentBlockStop: {},
357
- },
358
- ];
359
- const response = bedrockStreamMessage(results as any);
360
- expect(response.output.message.content).toHaveLength(1);
361
- expect(response.output.message.content[0]).toMatchObject({ text: "Hello world" });
362
- });
363
-
364
- it("merges toolUse deltas and parses input JSON", () => {
365
- const results = [
366
- {
367
- contentBlockStart: {
368
- start: {
369
- toolUse: { toolUseId: "toolu_01", name: "search" },
370
- },
371
- },
372
- },
373
- {
374
- contentBlockDelta: {
375
- delta: {
376
- toolUse: { input: '{"query":"test"}' },
377
- },
378
- },
379
- },
380
- {
381
- contentBlockStop: {},
382
- },
383
- ];
384
- const response = bedrockStreamMessage(results as any);
385
- expect(response.output.message.content).toHaveLength(1);
386
- expect(response.output.message.content[0].toolUse).toMatchObject({
387
- toolUseId: "toolu_01",
388
- name: "search",
389
- input: { query: "test" },
390
- });
391
- });
392
-
393
- it("merges reasoningContent into thinking block", () => {
394
- const results = [
395
- {
396
- contentBlockDelta: {
397
- delta: {
398
- reasoningContent: { text: "Think ", signature: "" },
399
- },
400
- },
401
- },
402
- {
403
- contentBlockDelta: {
404
- delta: {
405
- reasoningContent: { signature: "sig123" },
406
- },
407
- },
408
- },
409
- {
410
- contentBlockStop: {},
411
- },
412
- ];
413
- const response = bedrockStreamMessage(results as any);
414
- expect(response.output.message.content).toHaveLength(1);
415
- expect(response.output.message.content[0].reasoningContent.reasoningText).toMatchObject({
416
- text: "Think ",
417
- signature: "sig123",
418
- });
419
- });
420
- });
421
-
422
- describe("openaiStreamChat", () => {
423
- it("merges choice deltas into single message", () => {
424
- const results = [
425
- {
426
- id: "chatcmpl-1",
427
- model: "gpt-4o",
428
- created: 123,
429
- choices: [{ index: 0, delta: { content: "Hello " }, finish_reason: null }],
430
- system_fingerprint: null,
431
- },
432
- {
433
- id: "chatcmpl-1",
434
- model: "gpt-4o",
435
- created: 123,
436
- choices: [{ index: 0, delta: { content: "world" }, finish_reason: null }],
437
- system_fingerprint: null,
438
- },
439
- {
440
- id: "chatcmpl-1",
441
- model: "gpt-4o",
442
- created: 123,
443
- choices: [{ index: 0, delta: {}, finish_reason: "stop" }],
444
- system_fingerprint: null,
445
- usage: { prompt_tokens: 10, completion_tokens: 2 },
446
- },
447
- ];
448
- const response = openaiStreamChat(results as any);
449
- expect(response.choices).toHaveLength(1);
450
- expect(response.choices[0].message?.content).toBe("Hello world");
451
- expect(response.usage?.completion_tokens).toBe(2);
452
- });
453
-
454
- it("merges tool_calls from deltas (id in first chunk, arguments in later chunk)", () => {
455
- const results = [
456
- {
457
- id: "chatcmpl-1",
458
- model: "gpt-4o",
459
- created: 123,
460
- choices: [
461
- {
462
- index: 0,
463
- delta: {
464
- tool_calls: [
465
- { id: "call_01", index: 0, function: { name: "get_weather", arguments: "" } },
466
- ],
467
- },
468
- finish_reason: null,
469
- },
470
- ],
471
- system_fingerprint: null,
472
- },
473
- {
474
- id: "chatcmpl-1",
475
- model: "gpt-4o",
476
- created: 123,
477
- choices: [
478
- {
479
- index: 0,
480
- delta: {
481
- tool_calls: [
482
- { index: 0, function: { name: "", arguments: '{"city":"Boston"}' } },
483
- ],
484
- },
485
- finish_reason: null,
486
- },
487
- ],
488
- system_fingerprint: null,
489
- },
490
- ];
491
- const response = openaiStreamChat(results as any);
492
- expect(response.choices[0].message?.tool_calls).toHaveLength(1);
493
- expect(response.choices[0].message?.tool_calls![0]).toMatchObject({
494
- id: "call_01",
495
- function: { name: "get_weather", arguments: '{"city":"Boston"}' },
496
- });
497
- });
498
- });