@jupyterlite/ai 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +48 -9
  2. package/lib/chat-handler.d.ts +15 -3
  3. package/lib/chat-handler.js +80 -28
  4. package/lib/completion-provider.d.ts +5 -18
  5. package/lib/completion-provider.js +8 -34
  6. package/lib/icons.d.ts +2 -0
  7. package/lib/icons.js +15 -0
  8. package/lib/index.d.ts +3 -2
  9. package/lib/index.js +79 -22
  10. package/lib/llm-models/anthropic-completer.d.ts +19 -0
  11. package/lib/llm-models/anthropic-completer.js +57 -0
  12. package/lib/llm-models/base-completer.d.ts +6 -2
  13. package/lib/llm-models/chrome-completer.d.ts +19 -0
  14. package/lib/llm-models/chrome-completer.js +67 -0
  15. package/lib/llm-models/codestral-completer.d.ts +9 -8
  16. package/lib/llm-models/codestral-completer.js +37 -54
  17. package/lib/llm-models/index.d.ts +3 -2
  18. package/lib/llm-models/index.js +42 -2
  19. package/lib/llm-models/openai-completer.d.ts +19 -0
  20. package/lib/llm-models/openai-completer.js +51 -0
  21. package/lib/provider.d.ts +54 -15
  22. package/lib/provider.js +123 -41
  23. package/lib/settings/instructions.d.ts +2 -0
  24. package/lib/settings/instructions.js +44 -0
  25. package/lib/settings/panel.d.ts +70 -0
  26. package/lib/settings/panel.js +190 -0
  27. package/lib/settings/schemas/_generated/Anthropic.json +70 -0
  28. package/lib/settings/schemas/_generated/ChromeAI.json +21 -0
  29. package/lib/settings/schemas/_generated/MistralAI.json +75 -0
  30. package/lib/settings/schemas/_generated/OpenAI.json +668 -0
  31. package/lib/settings/schemas/base.json +7 -0
  32. package/lib/settings/schemas/index.d.ts +3 -0
  33. package/lib/settings/schemas/index.js +11 -0
  34. package/lib/slash-commands.d.ts +16 -0
  35. package/lib/slash-commands.js +25 -0
  36. package/lib/tokens.d.ts +103 -0
  37. package/lib/tokens.js +5 -0
  38. package/package.json +27 -104
  39. package/schema/chat.json +8 -0
  40. package/schema/provider-registry.json +17 -0
  41. package/src/chat-handler.ts +103 -43
  42. package/src/completion-provider.ts +13 -37
  43. package/src/icons.ts +18 -0
  44. package/src/index.ts +101 -24
  45. package/src/llm-models/anthropic-completer.ts +75 -0
  46. package/src/llm-models/base-completer.ts +7 -2
  47. package/src/llm-models/chrome-completer.ts +88 -0
  48. package/src/llm-models/codestral-completer.ts +43 -69
  49. package/src/llm-models/index.ts +49 -2
  50. package/src/llm-models/openai-completer.ts +67 -0
  51. package/src/llm-models/svg.d.ts +9 -0
  52. package/src/provider.ts +138 -43
  53. package/src/settings/instructions.ts +48 -0
  54. package/src/settings/panel.tsx +257 -0
  55. package/src/settings/schemas/index.ts +15 -0
  56. package/src/slash-commands.tsx +55 -0
  57. package/src/tokens.ts +112 -0
  58. package/style/base.css +4 -0
  59. package/style/icons/jupyternaut-lite.svg +7 -0
  60. package/lib/llm-models/utils.d.ts +0 -15
  61. package/lib/llm-models/utils.js +0 -29
  62. package/lib/token.d.ts +0 -13
  63. package/lib/token.js +0 -2
  64. package/schema/ai-provider.json +0 -21
  65. package/src/llm-models/utils.ts +0 -41
  66. package/src/token.ts +0 -19
@@ -0,0 +1,668 @@
1
+ {
2
+ "$schema": "http://json-schema.org/draft-07/schema#",
3
+ "type": "object",
4
+ "additionalProperties": false,
5
+ "properties": {
6
+ "disableStreaming": {
7
+ "type": "boolean",
8
+ "description": "Whether to disable streaming.\n\nIf streaming is bypassed, then `stream()` will defer to `invoke()`.\n\n- If true, will always bypass streaming case.\n- If false (default), will always use streaming case if available."
9
+ },
10
+ "logprobs": {
11
+ "type": "boolean",
12
+ "description": "Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message."
13
+ },
14
+ "topLogprobs": {
15
+ "type": "number",
16
+ "description": "An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used."
17
+ },
18
+ "prefixMessages": {
19
+ "type": "array",
20
+ "items": {
21
+ "anyOf": [
22
+ {
23
+ "type": "object",
24
+ "properties": {
25
+ "content": {
26
+ "anyOf": [
27
+ {
28
+ "type": "string"
29
+ },
30
+ {
31
+ "type": "array",
32
+ "items": {
33
+ "type": "object",
34
+ "properties": {
35
+ "text": {
36
+ "type": "string",
37
+ "description": "The text content."
38
+ },
39
+ "type": {
40
+ "type": "string",
41
+ "const": "text",
42
+ "description": "The type of the content part."
43
+ }
44
+ },
45
+ "required": [
46
+ "text",
47
+ "type"
48
+ ],
49
+ "additionalProperties": false,
50
+ "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)."
51
+ }
52
+ }
53
+ ],
54
+ "description": "The contents of the developer message."
55
+ },
56
+ "role": {
57
+ "type": "string",
58
+ "const": "developer",
59
+ "description": "The role of the messages author, in this case `developer`."
60
+ },
61
+ "name": {
62
+ "type": "string",
63
+ "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role."
64
+ }
65
+ },
66
+ "required": [
67
+ "content",
68
+ "role"
69
+ ],
70
+ "additionalProperties": false,
71
+ "description": "Developer-provided instructions that the model should follow, regardless of messages sent by the user. With o1 models and newer, `developer` messages replace the previous `system` messages."
72
+ },
73
+ {
74
+ "type": "object",
75
+ "properties": {
76
+ "content": {
77
+ "anyOf": [
78
+ {
79
+ "type": "string"
80
+ },
81
+ {
82
+ "type": "array",
83
+ "items": {
84
+ "type": "object",
85
+ "properties": {
86
+ "text": {
87
+ "type": "string",
88
+ "description": "The text content."
89
+ },
90
+ "type": {
91
+ "type": "string",
92
+ "const": "text",
93
+ "description": "The type of the content part."
94
+ }
95
+ },
96
+ "required": [
97
+ "text",
98
+ "type"
99
+ ],
100
+ "additionalProperties": false,
101
+ "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)."
102
+ }
103
+ }
104
+ ],
105
+ "description": "The contents of the system message."
106
+ },
107
+ "role": {
108
+ "type": "string",
109
+ "const": "system",
110
+ "description": "The role of the messages author, in this case `system`."
111
+ },
112
+ "name": {
113
+ "type": "string",
114
+ "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role."
115
+ }
116
+ },
117
+ "required": [
118
+ "content",
119
+ "role"
120
+ ],
121
+ "additionalProperties": false,
122
+ "description": "Developer-provided instructions that the model should follow, regardless of messages sent by the user. With o1 models and newer, use `developer` messages for this purpose instead."
123
+ },
124
+ {
125
+ "type": "object",
126
+ "properties": {
127
+ "content": {
128
+ "anyOf": [
129
+ {
130
+ "type": "string"
131
+ },
132
+ {
133
+ "type": "array",
134
+ "items": {
135
+ "anyOf": [
136
+ {
137
+ "type": "object",
138
+ "properties": {
139
+ "text": {
140
+ "type": "string",
141
+ "description": "The text content."
142
+ },
143
+ "type": {
144
+ "type": "string",
145
+ "const": "text",
146
+ "description": "The type of the content part."
147
+ }
148
+ },
149
+ "required": [
150
+ "text",
151
+ "type"
152
+ ],
153
+ "additionalProperties": false,
154
+ "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)."
155
+ },
156
+ {
157
+ "type": "object",
158
+ "properties": {
159
+ "image_url": {
160
+ "type": "object",
161
+ "properties": {
162
+ "url": {
163
+ "type": "string",
164
+ "description": "Either a URL of the image or the base64 encoded image data."
165
+ },
166
+ "detail": {
167
+ "type": "string",
168
+ "enum": [
169
+ "auto",
170
+ "low",
171
+ "high"
172
+ ],
173
+ "description": "Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding)."
174
+ }
175
+ },
176
+ "required": [
177
+ "url"
178
+ ],
179
+ "additionalProperties": false
180
+ },
181
+ "type": {
182
+ "type": "string",
183
+ "const": "image_url",
184
+ "description": "The type of the content part."
185
+ }
186
+ },
187
+ "required": [
188
+ "image_url",
189
+ "type"
190
+ ],
191
+ "additionalProperties": false,
192
+ "description": "Learn about [image inputs](https://platform.openai.com/docs/guides/vision)."
193
+ },
194
+ {
195
+ "type": "object",
196
+ "properties": {
197
+ "input_audio": {
198
+ "type": "object",
199
+ "properties": {
200
+ "data": {
201
+ "type": "string",
202
+ "description": "Base64 encoded audio data."
203
+ },
204
+ "format": {
205
+ "type": "string",
206
+ "enum": [
207
+ "wav",
208
+ "mp3"
209
+ ],
210
+ "description": "The format of the encoded audio data. Currently supports \"wav\" and \"mp3\"."
211
+ }
212
+ },
213
+ "required": [
214
+ "data",
215
+ "format"
216
+ ],
217
+ "additionalProperties": false
218
+ },
219
+ "type": {
220
+ "type": "string",
221
+ "const": "input_audio",
222
+ "description": "The type of the content part. Always `input_audio`."
223
+ }
224
+ },
225
+ "required": [
226
+ "input_audio",
227
+ "type"
228
+ ],
229
+ "additionalProperties": false,
230
+ "description": "Learn about [audio inputs](https://platform.openai.com/docs/guides/audio)."
231
+ }
232
+ ],
233
+ "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)."
234
+ }
235
+ }
236
+ ],
237
+ "description": "The contents of the user message."
238
+ },
239
+ "role": {
240
+ "type": "string",
241
+ "const": "user",
242
+ "description": "The role of the messages author, in this case `user`."
243
+ },
244
+ "name": {
245
+ "type": "string",
246
+ "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role."
247
+ }
248
+ },
249
+ "required": [
250
+ "content",
251
+ "role"
252
+ ],
253
+ "additionalProperties": false,
254
+ "description": "Messages sent by an end user, containing prompts or additional context information."
255
+ },
256
+ {
257
+ "type": "object",
258
+ "properties": {
259
+ "role": {
260
+ "type": "string",
261
+ "const": "assistant",
262
+ "description": "The role of the messages author, in this case `assistant`."
263
+ },
264
+ "audio": {
265
+ "anyOf": [
266
+ {
267
+ "type": "object",
268
+ "properties": {
269
+ "id": {
270
+ "type": "string",
271
+ "description": "Unique identifier for a previous audio response from the model."
272
+ }
273
+ },
274
+ "required": [
275
+ "id"
276
+ ],
277
+ "additionalProperties": false,
278
+ "description": "Data about a previous audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio)."
279
+ },
280
+ {
281
+ "type": "null"
282
+ }
283
+ ],
284
+ "description": "Data about a previous audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio)."
285
+ },
286
+ "content": {
287
+ "anyOf": [
288
+ {
289
+ "type": "string"
290
+ },
291
+ {
292
+ "type": "array",
293
+ "items": {
294
+ "anyOf": [
295
+ {
296
+ "type": "object",
297
+ "properties": {
298
+ "text": {
299
+ "type": "string",
300
+ "description": "The text content."
301
+ },
302
+ "type": {
303
+ "type": "string",
304
+ "const": "text",
305
+ "description": "The type of the content part."
306
+ }
307
+ },
308
+ "required": [
309
+ "text",
310
+ "type"
311
+ ],
312
+ "additionalProperties": false,
313
+ "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)."
314
+ },
315
+ {
316
+ "type": "object",
317
+ "properties": {
318
+ "refusal": {
319
+ "type": "string",
320
+ "description": "The refusal message generated by the model."
321
+ },
322
+ "type": {
323
+ "type": "string",
324
+ "const": "refusal",
325
+ "description": "The type of the content part."
326
+ }
327
+ },
328
+ "required": [
329
+ "refusal",
330
+ "type"
331
+ ],
332
+ "additionalProperties": false
333
+ }
334
+ ]
335
+ }
336
+ },
337
+ {
338
+ "type": "null"
339
+ }
340
+ ],
341
+ "description": "The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified."
342
+ },
343
+ "function_call": {
344
+ "anyOf": [
345
+ {
346
+ "type": "object",
347
+ "properties": {
348
+ "arguments": {
349
+ "type": "string",
350
+ "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."
351
+ },
352
+ "name": {
353
+ "type": "string",
354
+ "description": "The name of the function to call."
355
+ }
356
+ },
357
+ "required": [
358
+ "arguments",
359
+ "name"
360
+ ],
361
+ "additionalProperties": false,
362
+ "deprecated": "Deprecated and replaced by `tool_calls`. The name and arguments of a\nfunction that should be called, as generated by the model."
363
+ },
364
+ {
365
+ "type": "null"
366
+ }
367
+ ],
368
+ "deprecated": "Deprecated and replaced by `tool_calls`. The name and arguments of a\nfunction that should be called, as generated by the model."
369
+ },
370
+ "name": {
371
+ "type": "string",
372
+ "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role."
373
+ },
374
+ "refusal": {
375
+ "type": [
376
+ "string",
377
+ "null"
378
+ ],
379
+ "description": "The refusal message by the assistant."
380
+ },
381
+ "tool_calls": {
382
+ "type": "array",
383
+ "items": {
384
+ "type": "object",
385
+ "properties": {
386
+ "id": {
387
+ "type": "string",
388
+ "description": "The ID of the tool call."
389
+ },
390
+ "function": {
391
+ "type": "object",
392
+ "properties": {
393
+ "arguments": {
394
+ "type": "string",
395
+ "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function."
396
+ },
397
+ "name": {
398
+ "type": "string",
399
+ "description": "The name of the function to call."
400
+ }
401
+ },
402
+ "required": [
403
+ "arguments",
404
+ "name"
405
+ ],
406
+ "additionalProperties": false,
407
+ "description": "The function that the model called."
408
+ },
409
+ "type": {
410
+ "type": "string",
411
+ "const": "function",
412
+ "description": "The type of the tool. Currently, only `function` is supported."
413
+ }
414
+ },
415
+ "required": [
416
+ "id",
417
+ "function",
418
+ "type"
419
+ ],
420
+ "additionalProperties": false
421
+ },
422
+ "description": "The tool calls generated by the model, such as function calls."
423
+ }
424
+ },
425
+ "required": [
426
+ "role"
427
+ ],
428
+ "additionalProperties": false,
429
+ "description": "Messages sent by the model in response to user messages."
430
+ },
431
+ {
432
+ "type": "object",
433
+ "properties": {
434
+ "content": {
435
+ "anyOf": [
436
+ {
437
+ "type": "string"
438
+ },
439
+ {
440
+ "type": "array",
441
+ "items": {
442
+ "type": "object",
443
+ "properties": {
444
+ "text": {
445
+ "type": "string",
446
+ "description": "The text content."
447
+ },
448
+ "type": {
449
+ "type": "string",
450
+ "const": "text",
451
+ "description": "The type of the content part."
452
+ }
453
+ },
454
+ "required": [
455
+ "text",
456
+ "type"
457
+ ],
458
+ "additionalProperties": false,
459
+ "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)."
460
+ }
461
+ }
462
+ ],
463
+ "description": "The contents of the tool message."
464
+ },
465
+ "role": {
466
+ "type": "string",
467
+ "const": "tool",
468
+ "description": "The role of the messages author, in this case `tool`."
469
+ },
470
+ "tool_call_id": {
471
+ "type": "string",
472
+ "description": "Tool call that this message is responding to."
473
+ }
474
+ },
475
+ "required": [
476
+ "content",
477
+ "role",
478
+ "tool_call_id"
479
+ ],
480
+ "additionalProperties": false
481
+ },
482
+ {
483
+ "type": "object",
484
+ "properties": {
485
+ "content": {
486
+ "type": [
487
+ "string",
488
+ "null"
489
+ ],
490
+ "description": "The contents of the function message."
491
+ },
492
+ "name": {
493
+ "type": "string",
494
+ "description": "The name of the function to call."
495
+ },
496
+ "role": {
497
+ "type": "string",
498
+ "const": "function",
499
+ "description": "The role of the messages author, in this case `function`."
500
+ }
501
+ },
502
+ "required": [
503
+ "content",
504
+ "name",
505
+ "role"
506
+ ],
507
+ "additionalProperties": false,
508
+ "deprecated": true
509
+ }
510
+ ],
511
+ "description": "Developer-provided instructions that the model should follow, regardless of messages sent by the user. With o1 models and newer, `developer` messages replace the previous `system` messages."
512
+ },
513
+ "description": "ChatGPT messages to pass as a prefix to the prompt"
514
+ },
515
+ "__includeRawResponse": {
516
+ "type": "boolean",
517
+ "description": "Whether to include the raw OpenAI response in the output message's \"additional_kwargs\" field. Currently in experimental beta."
518
+ },
519
+ "supportsStrictToolCalling": {
520
+ "type": "boolean",
521
+ "description": "Whether the model supports the `strict` argument when passing in tools. If `undefined` the `strict` argument will not be passed to OpenAI."
522
+ },
523
+ "modalities": {
524
+ "type": "array",
525
+ "items": {
526
+ "type": "string",
527
+ "enum": [
528
+ "text",
529
+ "audio"
530
+ ]
531
+ },
532
+ "description": "Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default:\n\n`[\"text\"]`\n\nThe `gpt-4o-audio-preview` model can also be used to [generate audio](https://platform.openai.com/docs/guides/audio). To request that this model generate both text and audio responses, you can use:\n\n`[\"text\", \"audio\"]`"
533
+ },
534
+ "audio": {
535
+ "type": "object",
536
+ "properties": {
537
+ "format": {
538
+ "type": "string",
539
+ "enum": [
540
+ "wav",
541
+ "mp3",
542
+ "flac",
543
+ "opus",
544
+ "pcm16"
545
+ ],
546
+ "description": "Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`."
547
+ },
548
+ "voice": {
549
+ "type": "string",
550
+ "enum": [
551
+ "alloy",
552
+ "ash",
553
+ "ballad",
554
+ "coral",
555
+ "echo",
556
+ "sage",
557
+ "shimmer",
558
+ "verse"
559
+ ],
560
+ "description": "The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive)."
561
+ }
562
+ },
563
+ "required": [
564
+ "format",
565
+ "voice"
566
+ ],
567
+ "additionalProperties": false,
568
+ "description": "Parameters for audio output. Required when audio output is requested with `modalities: [\"audio\"]`. [Learn more](https://platform.openai.com/docs/guides/audio)."
569
+ },
570
+ "reasoningEffort": {
571
+ "type": "string",
572
+ "enum": [
573
+ "low",
574
+ "medium",
575
+ "high"
576
+ ],
577
+ "description": "Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response."
578
+ },
579
+ "temperature": {
580
+ "type": "number",
581
+ "description": "Sampling temperature to use"
582
+ },
583
+ "maxTokens": {
584
+ "type": "number",
585
+ "description": "Maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the model's maximum context size."
586
+ },
587
+ "maxCompletionTokens": {
588
+ "type": "number",
589
+ "description": "Maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the model's maximum context size. Alias for `maxTokens` for reasoning models."
590
+ },
591
+ "topP": {
592
+ "type": "number",
593
+ "description": "Total probability mass of tokens to consider at each step"
594
+ },
595
+ "frequencyPenalty": {
596
+ "type": "number",
597
+ "description": "Penalizes repeated tokens according to frequency"
598
+ },
599
+ "presencePenalty": {
600
+ "type": "number",
601
+ "description": "Penalizes repeated tokens"
602
+ },
603
+ "n": {
604
+ "type": "number",
605
+ "description": "Number of completions to generate for each prompt"
606
+ },
607
+ "logitBias": {
608
+ "type": "object",
609
+ "additionalProperties": {
610
+ "type": "number"
611
+ },
612
+ "description": "Dictionary used to adjust the probability of specific tokens being generated"
613
+ },
614
+ "user": {
615
+ "type": "string",
616
+ "description": "Unique string identifier representing your end-user, which can help OpenAI to monitor and detect abuse."
617
+ },
618
+ "streaming": {
619
+ "type": "boolean",
620
+ "description": "Whether to stream the results or not. Enabling disables tokenUsage reporting"
621
+ },
622
+ "streamUsage": {
623
+ "type": "boolean",
624
+ "description": "Whether or not to include token usage data in streamed chunks.",
625
+ "default": false
626
+ },
627
+ "modelName": {
628
+ "type": "string",
629
+ "description": "Model name to use Alias for `model`",
630
+ "deprecated": "Use \"model\" instead."
631
+ },
632
+ "model": {
633
+ "type": "string",
634
+ "description": "Model name to use"
635
+ },
636
+ "modelKwargs": {
637
+ "type": "object",
638
+ "description": "Holds any additional parameters that are valid to pass to {@link * https://platform.openai.com/docs/api-reference/completions/create | } * `openai.createCompletion`} that are not explicitly specified on this class."
639
+ },
640
+ "stop": {
641
+ "type": "array",
642
+ "items": {
643
+ "type": "string"
644
+ },
645
+ "description": "List of stop words to use when generating Alias for `stopSequences`"
646
+ },
647
+ "stopSequences": {
648
+ "type": "array",
649
+ "items": {
650
+ "type": "string"
651
+ },
652
+ "description": "List of stop words to use when generating"
653
+ },
654
+ "timeout": {
655
+ "type": "number",
656
+ "description": "Timeout to use when making requests to OpenAI."
657
+ },
658
+ "openAIApiKey": {
659
+ "type": "string",
660
+ "description": "API key to use when making requests to OpenAI. Defaults to the value of `OPENAI_API_KEY` environment variable. Alias for `apiKey`"
661
+ },
662
+ "apiKey": {
663
+ "type": "string",
664
+ "description": "API key to use when making requests to OpenAI. Defaults to the value of `OPENAI_API_KEY` environment variable."
665
+ }
666
+ },
667
+ "definitions": {}
668
+ }