llama_cpp 0.3.5 → 0.3.6

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,541 @@
1
+ #include "ggml-alloc.h"
2
+ #include "ggml.h"
3
+ #include <assert.h>
4
+ #include <stdarg.h>
5
+ #include <stdio.h>
6
+ #include <stdlib.h>
7
+ #include <string.h>
8
+
9
+ #define UNUSED(x) (void)(x)
10
+ #define MAX(a, b) ((a) > (b) ? (a) : (b))
11
+
12
+ //#define GGML_ALLOCATOR_DEBUG
13
+
14
+ //#define AT_PRINTF printf
15
+ #define AT_PRINTF(...) ((void)0)
16
+
17
+ struct hash_node {
18
+ struct ggml_tensor * t;
19
+ int n_children;
20
+ int n_views;
21
+ };
22
+
23
+ static size_t hash(void * p) {
24
+ return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE;
25
+ }
26
+
27
+ static struct hash_node * hash_get(struct hash_node hash_table[], struct ggml_tensor * t) {
28
+ size_t h = hash(t);
29
+
30
+ // linear probing
31
+ size_t i = h;
32
+ while (hash_table[i].t != NULL) {
33
+ if (hash_table[i].t == t) {
34
+ return &hash_table[i];
35
+ }
36
+ i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE;
37
+ if (i == h) {
38
+ // hash table is full
39
+ GGML_ASSERT(false);
40
+ }
41
+ }
42
+
43
+ hash_table[i].t = t;
44
+ return &hash_table[i];
45
+ }
46
+
47
+ // TODO: GGML_PAD ?
48
+ static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
49
+ assert(alignment && !(alignment & (alignment - 1))); // power of 2
50
+ size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
51
+ return offset + align;
52
+ }
53
+
54
+ struct free_block {
55
+ void * addr;
56
+ size_t size;
57
+ };
58
+
59
+ #define MAX_FREE_BLOCKS 128
60
+
61
+ struct ggml_allocr {
62
+ void * data;
63
+ size_t size;
64
+ size_t alignment;
65
+ int n_free_blocks;
66
+ struct free_block free_blocks[MAX_FREE_BLOCKS];
67
+ struct hash_node hash_table[GGML_GRAPH_HASHTABLE_SIZE];
68
+ size_t max_size;
69
+ bool measure;
70
+
71
+ #ifdef GGML_ALLOCATOR_DEBUG
72
+ struct ggml_tensor * allocated_tensors[1024];
73
+ #endif
74
+ };
75
+
76
+ #ifdef GGML_ALLOCATOR_DEBUG
77
+ static void add_allocated_tensor(struct ggml_allocator * alloc, struct ggml_tensor * tensor) {
78
+ for (int i = 0; i < 1024; i++) {
79
+ if (alloc->allocated_tensors[i] == NULL) {
80
+ alloc->allocated_tensors[i] = tensor;
81
+ return;
82
+ }
83
+ }
84
+ GGML_ASSERT(!"out of allocated_tensors");
85
+ }
86
+ static void remove_allocated_tensor(struct ggml_allocator * alloc, struct ggml_tensor * tensor) {
87
+ for (int i = 0; i < 1024; i++) {
88
+ if (alloc->allocated_tensors[i] == tensor ||
89
+ (alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
90
+ alloc->allocated_tensors[i] = NULL;
91
+ return;
92
+ }
93
+ }
94
+ printf("tried to free tensor %s not found\n", tensor->name);
95
+ GGML_ASSERT(!"tensor not found");
96
+ }
97
+ #endif
98
+
99
+
100
+ static size_t ggml_allocator_get_alloc_size(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
101
+ return ggml_nbytes(tensor);
102
+
103
+ UNUSED(alloc);
104
+ }
105
+
106
+ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
107
+ size_t size = ggml_allocator_get_alloc_size(alloc, tensor);
108
+ size = aligned_offset(NULL, size, alloc->alignment);
109
+
110
+ AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
111
+
112
+ size_t max_avail = 0;
113
+
114
+ // find the best fitting free block
115
+ int best_fit_block = -1;
116
+ size_t best_fit_size = SIZE_MAX;
117
+ for (int i = 0; i < alloc->n_free_blocks; i++) {
118
+ struct free_block * block = &alloc->free_blocks[i];
119
+ max_avail = MAX(max_avail, block->size);
120
+ if (block->size >= size && block->size <= best_fit_size) {
121
+ best_fit_block = i;
122
+ best_fit_size = block->size;
123
+ }
124
+ }
125
+
126
+ AT_PRINTF("block %d\n", best_fit_block);
127
+
128
+ if (best_fit_block == -1) {
129
+ fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
130
+ __func__, size, max_avail);
131
+ GGML_ASSERT(!"not enough space in the buffer");
132
+ return;
133
+ }
134
+ struct free_block * block = &alloc->free_blocks[best_fit_block];
135
+ void * addr = block->addr;
136
+ block->addr = (char*)block->addr + size;
137
+ block->size -= size;
138
+ if (block->size == 0) {
139
+ // remove block if empty
140
+ alloc->n_free_blocks--;
141
+ for (int j = best_fit_block; j < alloc->n_free_blocks; j++) {
142
+ alloc->free_blocks[j] = alloc->free_blocks[j+1];
143
+ }
144
+ }
145
+
146
+ tensor->data = addr;
147
+
148
+ #ifdef GGML_ALLOCATOR_DEBUG
149
+ add_allocated_tensor(alloc, tensor);
150
+ size_t cur_max = (char*)addr - (char*)alloc->data + size;
151
+ if (cur_max > alloc->max_size) {
152
+ printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
153
+ for (int i = 0; i < 1024; i++) {
154
+ if (alloc->allocated_tensors[i]) {
155
+ printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0);
156
+ }
157
+ }
158
+ printf("\n");
159
+ }
160
+ #endif
161
+
162
+ alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->data + size);
163
+ }
164
+
165
+ // this is a very naive implementation, but for our case the number of free blocks should be very small
166
+ static void ggml_allocator_free_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
167
+ void * ptr = tensor->data;
168
+
169
+ if (ptr < alloc->data || (char*)ptr >= (char*)alloc->data + alloc->max_size) {
170
+ // the tensor was not allocated in this buffer
171
+ // this can happen because the graph allocator will try to free weights and other tensors from different buffers
172
+ // the easiest way to deal with this is just to ignore it
173
+ return;
174
+ }
175
+
176
+ size_t size = ggml_allocator_get_alloc_size(alloc, tensor);
177
+ size = aligned_offset(NULL, size, alloc->alignment);
178
+ AT_PRINTF("%s: freeing %s (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, size, alloc->n_free_blocks);
179
+
180
+ #ifdef GGML_ALLOCATOR_DEBUG
181
+ remove_allocated_tensor(alloc, tensor);
182
+ #endif
183
+
184
+ // see if we can merge with an existing block
185
+ for (int i = 0; i < alloc->n_free_blocks; i++) {
186
+ struct free_block * block = &alloc->free_blocks[i];
187
+ // check if ptr is at the end of the block
188
+ if ((char*)block->addr + block->size == ptr) {
189
+ block->size += size;
190
+ // check if we can merge with the next block
191
+ if (i < alloc->n_free_blocks - 1 && (char*)block->addr + block->size == alloc->free_blocks[i+1].addr) {
192
+ block->size += alloc->free_blocks[i+1].size;
193
+ alloc->n_free_blocks--;
194
+ for (int j = i+1; j < alloc->n_free_blocks; j++) {
195
+ alloc->free_blocks[j] = alloc->free_blocks[j+1];
196
+ }
197
+ }
198
+ return;
199
+ }
200
+ // check if ptr is at the beginning of the block
201
+ if ((char*)ptr + size == block->addr) {
202
+ block->addr = ptr;
203
+ block->size += size;
204
+ // check if we can merge with the previous block
205
+ if (i > 0 && (char*)alloc->free_blocks[i-1].addr + alloc->free_blocks[i-1].size == block->addr) {
206
+ alloc->free_blocks[i-1].size += block->size;
207
+ alloc->n_free_blocks--;
208
+ for (int j = i; j < alloc->n_free_blocks; j++) {
209
+ alloc->free_blocks[j] = alloc->free_blocks[j+1];
210
+ }
211
+ }
212
+ return;
213
+ }
214
+ }
215
+ // otherwise, add a new block
216
+ GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
217
+ // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
218
+ int insert_pos = 0;
219
+ while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].addr < ptr) {
220
+ insert_pos++;
221
+ }
222
+ // shift all blocks from insert_pos onward to make room for the new block
223
+ for (int i = alloc->n_free_blocks; i > insert_pos; i--) {
224
+ alloc->free_blocks[i] = alloc->free_blocks[i-1];
225
+ }
226
+ // insert the new block
227
+ alloc->free_blocks[insert_pos].addr = ptr;
228
+ alloc->free_blocks[insert_pos].size = size;
229
+ alloc->n_free_blocks++;
230
+ }
231
+
232
+ void ggml_allocr_reset(struct ggml_allocr * alloc) {
233
+ alloc->n_free_blocks = 1;
234
+ size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment);
235
+ alloc->free_blocks[0].addr = (char *)alloc->data + align_offset;
236
+ alloc->free_blocks[0].size = alloc->size - align_offset;
237
+ }
238
+
239
+ struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment) {
240
+ struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
241
+
242
+ *alloc = (struct ggml_allocr){
243
+ /*.data = */ data,
244
+ /*.size = */ size,
245
+ /*.alignment = */ alignment,
246
+ /*.n_free_blocks = */ 0,
247
+ /*.free_blocks = */ {{0}},
248
+ /*.hash_table = */ {{0}},
249
+ /*.max_size = */ 0,
250
+ /*.measure = */ false,
251
+ #ifdef GGML_ALLOCATOR_DEBUG
252
+ /*.allocated_tensors = */ = {0},
253
+ #endif
254
+ };
255
+
256
+ ggml_allocr_reset(alloc);
257
+
258
+ return alloc;
259
+ }
260
+
261
+ // address and size of the buffer when measuring
262
+ // it needs to be large enough to fit all the tensors, but it cannot overlap with other existing buffers
263
+ static void * const MEASURE_BASE_ADDR = (void *) 0x1000;
264
+ static const size_t MEASURE_MAX_SIZE = 1ULL<<40; // 1 TB
265
+
266
+ struct ggml_allocr * ggml_allocr_new_measure(size_t alignment) {
267
+ struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
268
+
269
+ *alloc = (struct ggml_allocr){
270
+ /*.data = */ MEASURE_BASE_ADDR,
271
+ /*.size = */ MEASURE_MAX_SIZE,
272
+ /*.alignment = */ alignment,
273
+ /*.n_free_blocks = */ 0,
274
+ /*.free_blocks = */ {{0}},
275
+ /*.hash_table = */ {{0}},
276
+ /*.max_size = */ 0,
277
+ /*.measure = */ true,
278
+ #ifdef GGML_ALLOCATOR_DEBUG
279
+ /*.allocated_tensors = */ = {0},
280
+ #endif
281
+ };
282
+
283
+ ggml_allocr_reset(alloc);
284
+
285
+ return alloc;
286
+ }
287
+
288
+ void ggml_allocr_free(struct ggml_allocr * alloc) {
289
+ free(alloc);
290
+ }
291
+
292
+ bool ggml_allocr_is_measure(struct ggml_allocr * alloc) {
293
+ return alloc->measure;
294
+ }
295
+
296
+ //////////// compute graph allocator
297
+
298
+ static bool ggml_is_view(struct ggml_tensor * t) {
299
+ return t->op == GGML_OP_RESHAPE || t->op == GGML_OP_VIEW || t->op == GGML_OP_TRANSPOSE ||
300
+ t->op == GGML_OP_PERMUTE || t->op == GGML_OP_CPY;
301
+ }
302
+
303
+ static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
304
+ if (a->type != b->type) {
305
+ return false;
306
+ }
307
+ for (int i = 0; i < GGML_MAX_DIMS; i++) {
308
+ if (a->ne[i] != b->ne[i]) {
309
+ return false;
310
+ }
311
+ if (a->nb[i] != b->nb[i]) {
312
+ return false;
313
+ }
314
+ }
315
+ return true;
316
+ }
317
+
318
+ static struct ggml_tensor * get_view_parent(struct ggml_tensor * t) {
319
+ switch (t->op) {
320
+ case GGML_OP_PERMUTE:
321
+ case GGML_OP_RESHAPE:
322
+ case GGML_OP_TRANSPOSE:
323
+ case GGML_OP_VIEW:
324
+ return t->src[0];
325
+ case GGML_OP_CPY:
326
+ return t->src[1];
327
+ default:
328
+ return NULL;
329
+ }
330
+ }
331
+
332
+ static struct ggml_tensor * get_view_source(struct ggml_tensor * t) {
333
+ struct ggml_tensor * parent = t;
334
+ do {
335
+ parent = get_view_parent(parent);
336
+ } while (ggml_is_view(parent));
337
+ return parent;
338
+ }
339
+
340
+ static bool ggml_op_can_inplace(enum ggml_op op) {
341
+ switch (op) {
342
+ case GGML_OP_SCALE:
343
+ case GGML_OP_DIAG_MASK_ZERO:
344
+ case GGML_OP_DIAG_MASK_INF:
345
+ case GGML_OP_ADD:
346
+ case GGML_OP_ADD1:
347
+ case GGML_OP_ACC:
348
+ case GGML_OP_SUB:
349
+ case GGML_OP_MUL:
350
+ case GGML_OP_DIV:
351
+ case GGML_OP_SQR:
352
+ case GGML_OP_SQRT:
353
+ case GGML_OP_LOG:
354
+ case GGML_OP_UNARY:
355
+ case GGML_OP_ROPE:
356
+ case GGML_OP_RMS_NORM:
357
+ case GGML_OP_SET:
358
+ case GGML_OP_SOFT_MAX:
359
+ case GGML_OP_CONT:
360
+ return true;
361
+
362
+ default:
363
+ return false;
364
+ }
365
+ }
366
+
367
+ static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) {
368
+ struct hash_node * ht = alloc->hash_table;
369
+ if (node->data == NULL) {
370
+ if (ggml_is_view(node)) {
371
+ size_t offset;
372
+ switch(node->op) {
373
+ case GGML_OP_VIEW:
374
+ memcpy(&offset, node->op_params, sizeof(size_t));
375
+ node->data = (char *) node->src[0]->data + offset;
376
+ break;
377
+ case GGML_OP_PERMUTE:
378
+ case GGML_OP_RESHAPE:
379
+ case GGML_OP_TRANSPOSE:
380
+ node->data = node->src[0]->data;
381
+ break;
382
+ case GGML_OP_CPY:
383
+ node->data = node->src[1]->data;
384
+ break;
385
+ default:
386
+ GGML_ASSERT(!"unknown view op");
387
+ break;
388
+ }
389
+ } else {
390
+ // see if we can reuse a parent's buffer (inplace)
391
+ if (ggml_op_can_inplace(node->op)) {
392
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
393
+ struct ggml_tensor * parent = node->src[i];
394
+ if (parent == NULL) {
395
+ break;
396
+ }
397
+ struct hash_node * p_hn = hash_get(ht, parent);
398
+ if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && ggml_are_same_layout(node, parent)) {
399
+ if (ggml_is_view(parent)) {
400
+ struct ggml_tensor * view_src = get_view_source(parent);
401
+ struct hash_node * view_src_hn = hash_get(ht, view_src);
402
+ if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
403
+ // TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
404
+ // the parent's data that it will need later (same layout requirement). the problem is that then
405
+ // we cannot free the tensor because the original address of the allocation is lost.
406
+ // adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
407
+ // for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
408
+ AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
409
+ node->data = parent->data;
410
+ return;
411
+ }
412
+ }
413
+ else {
414
+ AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
415
+ node->data = parent->data;
416
+ }
417
+ return;
418
+ }
419
+ }
420
+ }
421
+ ggml_allocr_alloc(alloc, node);
422
+ }
423
+ }
424
+ }
425
+
426
+ static size_t ggml_allocator_alloc_graph_tensors_n(
427
+ struct ggml_allocr * alloc,
428
+ struct ggml_cgraph ** graphs, int n_graphs,
429
+ struct ggml_tensor *** inputs, struct ggml_tensor *** outputs) {
430
+
431
+ // reset hash table
432
+ struct hash_node * ht = alloc->hash_table;
433
+ memset(ht, 0, sizeof(struct hash_node) * GGML_GRAPH_HASHTABLE_SIZE);
434
+
435
+ // count number of children and views
436
+ for (int g = 0; g < n_graphs; g++) {
437
+ struct ggml_cgraph * gf = graphs[g];
438
+ for (int i = 0; i < gf->n_nodes; i++) {
439
+ struct ggml_tensor * node = gf->nodes[i];
440
+
441
+ if (ggml_is_view(node)) {
442
+ struct ggml_tensor * view_src = get_view_source(node);
443
+ hash_get(ht, view_src)->n_views += 1;
444
+ }
445
+
446
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
447
+ struct ggml_tensor * parent = node->src[j];
448
+ if (parent == NULL) {
449
+ break;
450
+ }
451
+ hash_get(ht, parent)->n_children += 1;
452
+ }
453
+ }
454
+ }
455
+
456
+ // allocate tensors
457
+ for (int g = 0; g < n_graphs; g++) {
458
+ struct ggml_cgraph * gf = graphs[g];
459
+ AT_PRINTF("####### graph %d/%d\n", g, n_graphs);
460
+ // graph inputs are allocated first to ensure that they are not overwritten by each other
461
+ if (inputs != NULL && inputs[g] != NULL) {
462
+ for (int i = 0; inputs[g][i] != NULL; i++) {
463
+ struct ggml_tensor * input = inputs[g][i];
464
+ AT_PRINTF("input: %s\n", input->name);
465
+ allocate_node(alloc, input);
466
+ }
467
+ }
468
+ for (int i = 0; i < gf->n_nodes; i++) {
469
+ struct ggml_tensor * node = gf->nodes[i];
470
+
471
+ // allocate parents (leafs)
472
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
473
+ struct ggml_tensor * parent = node->src[j];
474
+ if (parent == NULL) {
475
+ break;
476
+ }
477
+ allocate_node(alloc, parent);
478
+ }
479
+
480
+ // allocate node
481
+ allocate_node(alloc, node);
482
+
483
+ AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name);
484
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
485
+ struct ggml_tensor * parent = node->src[j];
486
+ if (parent == NULL) {
487
+ break;
488
+ }
489
+ AT_PRINTF("%s", parent->name);
490
+ if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
491
+ AT_PRINTF(", ");
492
+ }
493
+ }
494
+ AT_PRINTF("\n");
495
+
496
+ // update parents
497
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
498
+ struct ggml_tensor * parent = node->src[j];
499
+ if (parent == NULL) {
500
+ break;
501
+ }
502
+ struct hash_node * p_hn = hash_get(ht, parent);
503
+ p_hn->n_children -= 1;
504
+
505
+ //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
506
+
507
+ if (p_hn->n_children == 0 && p_hn->n_views == 0) {
508
+ if (ggml_is_view(parent)) {
509
+ struct ggml_tensor * view_src = get_view_source(parent);
510
+ struct hash_node * view_src_hn = hash_get(ht, view_src);
511
+ view_src_hn->n_views -= 1;
512
+ AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src->n_children, view_src->n_views);
513
+ if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
514
+ ggml_allocator_free_tensor(alloc, view_src);
515
+ }
516
+ }
517
+ else {
518
+ if (parent->data != node->data) {
519
+ ggml_allocator_free_tensor(alloc, parent);
520
+ }
521
+ }
522
+ }
523
+ }
524
+ AT_PRINTF("\n");
525
+ }
526
+ // free graph outputs here that wouldn't be freed otherwise because they have no children
527
+ if (outputs != NULL && outputs[g] != NULL) {
528
+ for (int i = 0; outputs[g][i] != NULL; i++) {
529
+ struct ggml_tensor * output = outputs[g][i];
530
+ AT_PRINTF("output: %s\n", output->name);
531
+ ggml_allocator_free_tensor(alloc, output);
532
+ }
533
+ }
534
+ }
535
+
536
+ return alloc->max_size;
537
+ }
538
+
539
+ size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph) {
540
+ return ggml_allocator_alloc_graph_tensors_n(alloc, &graph, 1, NULL, NULL);
541
+ }
@@ -0,0 +1,22 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+
5
+ #ifdef __cplusplus
6
+ extern "C" {
7
+ #endif
8
+
9
+
10
+ GGML_API struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment);
11
+ GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment);
12
+
13
+ GGML_API void ggml_allocr_free(struct ggml_allocr * alloc);
14
+ GGML_API bool ggml_allocr_is_measure(struct ggml_allocr * alloc);
15
+ GGML_API void ggml_allocr_reset(struct ggml_allocr * alloc);
16
+ GGML_API void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor);
17
+ GGML_API size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph);
18
+
19
+
20
+ #ifdef __cplusplus
21
+ }
22
+ #endif