sedna 0.5.1 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (128) hide show
  1. data/{CHANGES → CHANGES.rdoc} +9 -0
  2. data/{README → README.rdoc} +23 -25
  3. data/Rakefile +32 -9
  4. data/ext/{extconf.rb → sedna/extconf.rb} +33 -21
  5. data/ext/{sedna.c → sedna/sedna.c} +48 -40
  6. data/test/sedna_test.rb +9 -9
  7. data/vendor/sedna/AUTHORS +18 -0
  8. data/vendor/sedna/COPYRIGHT +90 -0
  9. data/vendor/sedna/LICENSE +202 -0
  10. data/vendor/sedna/Makefile.include +423 -0
  11. data/vendor/sedna/Makefile.platform +31 -0
  12. data/vendor/sedna/depend.sed +48 -0
  13. data/vendor/sedna/driver/c/Makefile +98 -0
  14. data/vendor/sedna/driver/c/libsedna.c +1998 -0
  15. data/vendor/sedna/driver/c/libsedna.h +199 -0
  16. data/vendor/sedna/driver/c/sednamt.def +21 -0
  17. data/vendor/sedna/driver/c/sp_defs.h +186 -0
  18. data/vendor/sedna/kernel/common/FastXptrHash.cpp +101 -0
  19. data/vendor/sedna/kernel/common/IntHash.h +314 -0
  20. data/vendor/sedna/kernel/common/IntList.h +224 -0
  21. data/vendor/sedna/kernel/common/Makefile +30 -0
  22. data/vendor/sedna/kernel/common/SSMMsg.cpp +459 -0
  23. data/vendor/sedna/kernel/common/SSMMsg.h +142 -0
  24. data/vendor/sedna/kernel/common/XptrHash.h +435 -0
  25. data/vendor/sedna/kernel/common/argtable.c +972 -0
  26. data/vendor/sedna/kernel/common/argtable.h +896 -0
  27. data/vendor/sedna/kernel/common/base.cpp +339 -0
  28. data/vendor/sedna/kernel/common/base.h +226 -0
  29. data/vendor/sedna/kernel/common/bit_set.cpp +157 -0
  30. data/vendor/sedna/kernel/common/bit_set.h +55 -0
  31. data/vendor/sedna/kernel/common/commutil.h +67 -0
  32. data/vendor/sedna/kernel/common/config.h +62 -0
  33. data/vendor/sedna/kernel/common/counted_ptr.h +74 -0
  34. data/vendor/sedna/kernel/common/errdbg/ErrorCodes.java +1056 -0
  35. data/vendor/sedna/kernel/common/errdbg/Makefile +34 -0
  36. data/vendor/sedna/kernel/common/errdbg/assert.c +133 -0
  37. data/vendor/sedna/kernel/common/errdbg/d_printf.c +150 -0
  38. data/vendor/sedna/kernel/common/errdbg/d_printf.h +91 -0
  39. data/vendor/sedna/kernel/common/errdbg/error.codes +1743 -0
  40. data/vendor/sedna/kernel/common/errdbg/error_codes.c +531 -0
  41. data/vendor/sedna/kernel/common/errdbg/error_codes.h +549 -0
  42. data/vendor/sedna/kernel/common/errdbg/error_codes_scm.scm +527 -0
  43. data/vendor/sedna/kernel/common/errdbg/event_log.c +956 -0
  44. data/vendor/sedna/kernel/common/errdbg/event_log.h +226 -0
  45. data/vendor/sedna/kernel/common/errdbg/exceptions.cpp +155 -0
  46. data/vendor/sedna/kernel/common/errdbg/exceptions.h +559 -0
  47. data/vendor/sedna/kernel/common/errdbg/gen_error_codes +0 -0
  48. data/vendor/sedna/kernel/common/errdbg/gen_error_codes.c +345 -0
  49. data/vendor/sedna/kernel/common/gmm.cpp +192 -0
  50. data/vendor/sedna/kernel/common/gmm.h +29 -0
  51. data/vendor/sedna/kernel/common/ipc_ops.cpp +435 -0
  52. data/vendor/sedna/kernel/common/ipc_ops.h +51 -0
  53. data/vendor/sedna/kernel/common/lfsGlobals.h +12 -0
  54. data/vendor/sedna/kernel/common/lm_base.h +90 -0
  55. data/vendor/sedna/kernel/common/mmgr/Makefile +11 -0
  56. data/vendor/sedna/kernel/common/mmgr/aset.c +1185 -0
  57. data/vendor/sedna/kernel/common/mmgr/mcxt.c +741 -0
  58. data/vendor/sedna/kernel/common/mmgr/memnodes.h +70 -0
  59. data/vendor/sedna/kernel/common/mmgr/memutils.h +145 -0
  60. data/vendor/sedna/kernel/common/mmgr/se_alloc.h +321 -0
  61. data/vendor/sedna/kernel/common/mmgr/track.c +214 -0
  62. data/vendor/sedna/kernel/common/pping.cpp +672 -0
  63. data/vendor/sedna/kernel/common/pping.h +119 -0
  64. data/vendor/sedna/kernel/common/rcv_test.cpp +273 -0
  65. data/vendor/sedna/kernel/common/rcv_test.h +19 -0
  66. data/vendor/sedna/kernel/common/sedna.c +128 -0
  67. data/vendor/sedna/kernel/common/sedna.h +49 -0
  68. data/vendor/sedna/kernel/common/sedna_ef.h +52 -0
  69. data/vendor/sedna/kernel/common/sm_vmm_data.h +144 -0
  70. data/vendor/sedna/kernel/common/sp.c +93 -0
  71. data/vendor/sedna/kernel/common/sp.h +36 -0
  72. data/vendor/sedna/kernel/common/st/Makefile +20 -0
  73. data/vendor/sedna/kernel/common/st/os_linux/stacktrace.c +213 -0
  74. data/vendor/sedna/kernel/common/st/os_nt/stacktrace.c +338 -0
  75. data/vendor/sedna/kernel/common/st/os_other/stacktrace.c +39 -0
  76. data/vendor/sedna/kernel/common/st/stacktrace.h +72 -0
  77. data/vendor/sedna/kernel/common/st/stacktrfmt.c +64 -0
  78. data/vendor/sedna/kernel/common/tr_debug.cpp +112 -0
  79. data/vendor/sedna/kernel/common/tr_debug.h +22 -0
  80. data/vendor/sedna/kernel/common/u/Makefile +14 -0
  81. data/vendor/sedna/kernel/common/u/u.c +268 -0
  82. data/vendor/sedna/kernel/common/u/u.h +715 -0
  83. data/vendor/sedna/kernel/common/u/uatomic.h +12 -0
  84. data/vendor/sedna/kernel/common/u/udl.h +31 -0
  85. data/vendor/sedna/kernel/common/u/uevent.c +406 -0
  86. data/vendor/sedna/kernel/common/u/uevent.h +71 -0
  87. data/vendor/sedna/kernel/common/u/ugnames.cpp +330 -0
  88. data/vendor/sedna/kernel/common/u/ugnames.h +134 -0
  89. data/vendor/sedna/kernel/common/u/uhash_map.h +77 -0
  90. data/vendor/sedna/kernel/common/u/uhdd.c +1018 -0
  91. data/vendor/sedna/kernel/common/u/uhdd.h +206 -0
  92. data/vendor/sedna/kernel/common/u/ummap.cpp +268 -0
  93. data/vendor/sedna/kernel/common/u/ummap.h +60 -0
  94. data/vendor/sedna/kernel/common/u/umutex.c +145 -0
  95. data/vendor/sedna/kernel/common/u/umutex.h +65 -0
  96. data/vendor/sedna/kernel/common/u/upipe.cpp +244 -0
  97. data/vendor/sedna/kernel/common/u/upipe.h +74 -0
  98. data/vendor/sedna/kernel/common/u/uprocess.c +767 -0
  99. data/vendor/sedna/kernel/common/u/uprocess.h +91 -0
  100. data/vendor/sedna/kernel/common/u/usafesync.h +41 -0
  101. data/vendor/sedna/kernel/common/u/usecurity.c +150 -0
  102. data/vendor/sedna/kernel/common/u/usecurity.h +55 -0
  103. data/vendor/sedna/kernel/common/u/usem.c +891 -0
  104. data/vendor/sedna/kernel/common/u/usem.h +83 -0
  105. data/vendor/sedna/kernel/common/u/ushm.c +222 -0
  106. data/vendor/sedna/kernel/common/u/ushm.h +46 -0
  107. data/vendor/sedna/kernel/common/u/usocket.c +541 -0
  108. data/vendor/sedna/kernel/common/u/usocket.h +118 -0
  109. data/vendor/sedna/kernel/common/u/usystem.c +57 -0
  110. data/vendor/sedna/kernel/common/u/usystem.h +46 -0
  111. data/vendor/sedna/kernel/common/u/uthread.c +259 -0
  112. data/vendor/sedna/kernel/common/u/uthread.h +95 -0
  113. data/vendor/sedna/kernel/common/u/utime.c +65 -0
  114. data/vendor/sedna/kernel/common/u/utime.h +40 -0
  115. data/vendor/sedna/kernel/common/u/uutils.c +142 -0
  116. data/vendor/sedna/kernel/common/u/uutils.h +65 -0
  117. data/vendor/sedna/kernel/common/ugc.cpp +156 -0
  118. data/vendor/sedna/kernel/common/ugc.h +15 -0
  119. data/vendor/sedna/kernel/common/utils.cpp +156 -0
  120. data/vendor/sedna/kernel/common/utils.h +133 -0
  121. data/vendor/sedna/kernel/common/version.c +16 -0
  122. data/vendor/sedna/kernel/common/version.h +21 -0
  123. data/vendor/sedna/kernel/common/wustructures.h +18 -0
  124. data/vendor/sedna/kernel/common/wutypes.h +34 -0
  125. data/vendor/sedna/kernel/common/xptr.cpp +17 -0
  126. data/vendor/sedna/kernel/common/xptr.h +211 -0
  127. data/vendor/sedna/ver +1 -0
  128. metadata +142 -14
@@ -0,0 +1,51 @@
1
+ /*
2
+ * File: ipc_ops.h
3
+ * Copyright (C) 2004 The Institute for System Programming of the Russian Academy of Sciences (ISP RAS)
4
+ */
5
+
6
+ #ifndef _IPC_OPS_H
7
+ #define _IPC_OPS_H
8
+
9
+ #include "common/sedna.h"
10
+
11
+ #include "common/u/ushm.h"
12
+ #include "common/config.h"
13
+
14
+ void
15
+ open_gov_shm ();
16
+
17
+ int
18
+ close_gov_shm ();
19
+
20
+ void
21
+ send_command_to_gov(int port_number, int cmd);
22
+
23
+ int
24
+ get_db_id_by_name(gov_config_struct* cfg, const char* db_name);
25
+
26
+ int
27
+ get_next_free_db_id(gov_config_struct* cfg);
28
+
29
+ void
30
+ erase_database_cell_in_gov_shm(int db_id, gov_config_struct* cfg);
31
+
32
+ void
33
+ fill_database_cell_in_gov_shm(gov_config_struct* cfg,
34
+ int db_id,
35
+ const char* db_name,
36
+ int bufs_num,
37
+ int max_trs_num,
38
+ double upd_crt,
39
+ int max_log_files,
40
+ int tmp_file_initial_size);
41
+
42
+ void
43
+ get_sednaconf_values(gov_header_struct* cfg);
44
+
45
+
46
+ /* Typed pointers to the sedna_gov_shm_ptr */
47
+ #define GOV_HEADER_GLOBAL_PTR ( GOV_HEADER_STRUCT_PTR(sedna_gov_shm_ptr) )
48
+ #define GOV_CONFIG_GLOBAL_PTR ( GOV_CONFIG_STRUCT_PTR(sedna_gov_shm_ptr) )
49
+
50
+ #endif /* _IPC_OPS_H_ */
51
+
@@ -0,0 +1,12 @@
1
+ #ifndef _LFS_GLOBALS_
2
+ #define _LFS_GLOBALS_
3
+
4
+ #include <stdint.h>
5
+ #include <stddef.h>
6
+
7
+ typedef uint64_t LSN;
8
+
9
+ #define LFS_INVALID_LSN UINT64_C(0xFFFFFFFFFFFFFFFF)
10
+ #define LFS_INVALID_FILE UINT64_C(0xFFFFFFFFFFFFFFFF)
11
+
12
+ #endif
@@ -0,0 +1,90 @@
1
+ /*
2
+ * File: lm_base.h
3
+ * Copyright (C) 2004 The Institute for System Programming of the Russian Academy of Sciences (ISP RAS)
4
+ */
5
+
6
+ #ifndef _LM_BASE_H
7
+ #define _LM_BASE_H
8
+
9
+ #include <string>
10
+ #include "common/sedna.h"
11
+ #include "common/base.h"
12
+
13
+ /*
14
+ #ifdef _WIN32
15
+ #include <hash_map>
16
+ #else
17
+ #include <ext/hash_map>
18
+ #endif
19
+ */
20
+
21
+ #define LOCK_MGR_ON
22
+
23
+ enum resource_kind {LM_DOCUMENT, LM_COLLECTION, LM_INDEX, LM_TRIGGER, LM_DATABASE};
24
+
25
+ enum lock_mode
26
+ {
27
+ NULL_LOCK,
28
+ lm_s, // Shared
29
+ lm_x, // eXclusive
30
+ lm_is, //intention shared
31
+ lm_ix, //intention exclusive
32
+ lm_six //shared and intention exclusive
33
+ };
34
+
35
+ //enum lock_reply {LOCK_GRANTED, LOCK_WAIT, LOCK_ERROR};
36
+
37
+ enum lock_reply {LOCK_OK, LOCK_TIMEOUT, LOCK_DEADLOCK, LOCK_NOT_LOCKED };
38
+
39
+ enum lock_status {LOCK_GRANTED, LOCK_CONVERTING, LOCK_WAITING, LOCK_DENIED, NONE_STATUS};
40
+
41
+ enum lock_class {LOCK_INSTANT, LOCK_SHORT, LOCK_MEDIUM, LOCK_LONG, LOCK_VERY_LONG};
42
+
43
+ class resource_id
44
+ {
45
+ private:
46
+ std::string res_name;
47
+ resource_kind kind; //document or collection
48
+ public:
49
+ friend void print_resource_id(std::string);
50
+ resource_id(std::string r_n, resource_kind r_k): res_name(r_n), kind(r_k) {};
51
+ resource_id(const resource_id& r_id): res_name(r_id.res_name), kind(r_id.kind) {};
52
+ resource_id(){};
53
+ std::string get_res_name() { return res_name;};
54
+ resource_kind get_resource_kind() {return kind;};
55
+ std::string get_str_res_id()
56
+ {
57
+ std::string hash_r_id;
58
+
59
+ switch (kind)
60
+ {
61
+ case LM_DOCUMENT: hash_r_id = std::string("_doc_") + res_name;
62
+ break;
63
+
64
+ case LM_COLLECTION: hash_r_id = std::string("_col_") + res_name;
65
+ break;
66
+
67
+ case LM_INDEX: hash_r_id = std::string("_ind_") + res_name;
68
+ break;
69
+
70
+ case LM_TRIGGER: hash_r_id = std::string("_trg_") + res_name;
71
+ break;
72
+
73
+ case LM_DATABASE: hash_r_id = std::string("_dtb_") + res_name;
74
+ break;
75
+
76
+ default: throw USER_EXCEPTION(SE4700);
77
+ }
78
+
79
+ return hash_r_id;
80
+
81
+ };
82
+ };
83
+
84
+
85
+
86
+ enum lm_commands {LM_LOCK, LM_RELEASE};
87
+
88
+ #define MAX_SEM_NAME_LENGTH 100
89
+
90
+ #endif
@@ -0,0 +1,11 @@
1
+ #
2
+ # Makefile for mmgr (GNU make)
3
+ #
4
+
5
+ PP = ../../..
6
+
7
+ include $(PP)/Makefile.include
8
+
9
+ OBJS = aset$(OBJ_EXT) mcxt$(OBJ_EXT) track$(OBJ_EXT)
10
+
11
+ include $(PP)/Makefile.pseudolib
@@ -0,0 +1,1185 @@
1
+ /*
2
+ * File: aset.c
3
+ * Allocation set definitions.
4
+ *
5
+ * Portions Copyright (C) 2006 The Institute for System Programming of the Russian Academy of Sciences (ISP RAS)
6
+ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
7
+ * Portions Copyright (c) 1994, Regents of the University of California
8
+ *
9
+ * AllocSet is our standard implementation of the abstract MemoryContext
10
+ * type.
11
+ *
12
+ *
13
+ * NOTE:
14
+ * This is a new (Feb. 05, 1999) implementation of the allocation set
15
+ * routines. AllocSet...() does not use OrderedSet...() any more.
16
+ * Instead it manages allocations in a block pool by itself, combining
17
+ * many small allocations in a few bigger blocks. AllocSetFree() normally
18
+ * doesn't free() memory really. It just add's the free'd area to some
19
+ * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
20
+ * at once on AllocSetReset(), which happens when the memory context gets
21
+ * destroyed.
22
+ * Jan Wieck
23
+ *
24
+ * Performance improvement from Tom Lane, 8/99: for extremely large request
25
+ * sizes, we do want to be able to give the memory back to free() as soon
26
+ * as it is se_free()'d. Otherwise we risk tying up a lot of memory in
27
+ * freelist entries that might never be usable. This is specially needed
28
+ * when the caller is repeatedly se_realloc()'ing a block bigger and bigger;
29
+ * the previous instances of the block were guaranteed to be wasted until
30
+ * AllocSetReset() under the old way.
31
+ *
32
+ * Further improvement 12/00: as the code stood, request sizes in the
33
+ * midrange between "small" and "large" were handled very inefficiently,
34
+ * because any sufficiently large free chunk would be used to satisfy a
35
+ * request, even if it was much larger than necessary. This led to more
36
+ * and more wasted space in allocated chunks over time. To fix, get rid
37
+ * of the midrange behavior: we now handle only "small" power-of-2-size
38
+ * chunks as chunks. Anything "large" is passed off to malloc(). Change
39
+ * the number of freelists to change the small/large boundary.
40
+ *
41
+ *
42
+ * About CLOBBER_FREED_MEMORY:
43
+ *
44
+ * If this symbol is defined, all freed memory is overwritten with 0x7F's.
45
+ * This is useful for catching places that reference already-freed memory.
46
+ *
47
+ * About MEMORY_CONTEXT_CHECKING:
48
+ *
49
+ * Since we usually round request sizes up to the next power of 2, there
50
+ * is often some unused space immediately after a requested data area.
51
+ * Thus, if someone makes the common error of writing past what they've
52
+ * requested, the problem is likely to go unnoticed ... until the day when
53
+ * there *isn't* any wasted space, perhaps because of different memory
54
+ * alignment on a new platform, or some other effect. To catch this sort
55
+ * of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond
56
+ * the requested space whenever the request is less than the actual chunk
57
+ * size, and verifies that the byte is undamaged when the chunk is freed.
58
+ *
59
+ */
60
+
61
+ #include "common/sedna.h"
62
+ #include "common/mmgr/memutils.h"
63
+
64
+ #ifndef SE_MEMORY_TRACK
65
+
66
+ /* Define this to detail debug alloc information */
67
+ /* #define HAVE_ALLOCINFO */
68
+
69
+ /*--------------------
70
+ * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
71
+ * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
72
+ *
73
+ * Note that all chunks in the freelists have power-of-2 sizes. This
74
+ * improves recyclability: we may waste some space, but the wasted space
75
+ * should stay pretty constant as requests are made and released.
76
+ *
77
+ * A request too large for the last freelist is handled by allocating a
78
+ * dedicated block from malloc(). The block still has a block header and
79
+ * chunk header, but when the chunk is freed we'll return the whole block
80
+ * to malloc(), not put it on our freelists.
81
+ *
82
+ * CAUTION: ALLOC_MINBITS must be large enough so that
83
+ * 1<<ALLOC_MINBITS is at least MAXALIGN,
84
+ * or we may fail to align the smallest chunks adequately.
85
+ * 8-byte alignment is enough on all currently known machines.
86
+ *
87
+ * With the current parameters, request sizes up to 8K are treated as chunks,
88
+ * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
89
+ * to adjust the boundary point.
90
+ *--------------------
91
+ */
92
+
93
+ #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
94
+ #define ALLOCSET_NUM_FREELISTS 11
95
+ #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
96
+ /* Size of largest chunk that we use a fixed size for */
97
+
98
+ /*--------------------
99
+ * The first block allocated for an allocset has size initBlockSize.
100
+ * Each time we have to allocate another block, we double the block size
101
+ * (if possible, and without exceeding maxBlockSize), so as to reduce
102
+ * the bookkeeping load on malloc().
103
+ *
104
+ * Blocks allocated to hold oversize chunks do not follow this rule, however;
105
+ * they are just however big they need to be to hold that single chunk.
106
+ *--------------------
107
+ */
108
+
109
+ #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
110
+ #define ALLOC_CHUNKHDRSZ MAXALIGN(sizeof(AllocChunkData))
111
+
112
+ typedef struct AllocBlockData *AllocBlock; /* forward reference */
113
+ typedef struct AllocChunkData *AllocChunk;
114
+
115
+ /*
116
+ * AllocPointer
117
+ * Aligned pointer which may be a member of an allocation set.
118
+ */
119
+ typedef void *AllocPointer;
120
+
121
+ /*
122
+ * AllocSetContext is our standard implementation of MemoryContext.
123
+ *
124
+ * Note: isReset means there is nothing for AllocSetReset to do. This is
125
+ * different from the aset being physically empty (empty blocks list) because
126
+ * we may still have a keeper block. It's also different from the set being
127
+ * logically empty, because we don't attempt to detect se_free'ing the last
128
+ * active chunk.
129
+ */
130
+ typedef struct AllocSetContext
131
+ {
132
+ MemoryContextData header; /* Standard memory-context fields */
133
+ /* Info about storage allocated in this context: */
134
+ AllocBlock blocks; /* head of list of blocks in this set */
135
+ AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
136
+ bool isReset; /* T = no space alloced since last reset */
137
+ /* Allocation parameters for this context: */
138
+ usize_t initBlockSize; /* initial block size */
139
+ usize_t maxBlockSize; /* maximum block size */
140
+ AllocBlock keeper; /* if not NULL, keep this block over resets */
141
+ } AllocSetContext;
142
+
143
+ typedef AllocSetContext *AllocSet;
144
+
145
+ /*
146
+ * AllocBlock
147
+ * An AllocBlock is the unit of memory that is obtained by aset.c
148
+ * from malloc(). It contains one or more AllocChunks, which are
149
+ * the units requested by se_alloc() and freed by se_free(). AllocChunks
150
+ * cannot be returned to malloc() individually, instead they are put
151
+ * on freelists by se_free() and re-used by the next se_alloc() that has
152
+ * a matching request size.
153
+ *
154
+ * AllocBlockData is the header data for a block --- the usable space
155
+ * within the block begins at the next alignment boundary.
156
+ */
157
+ typedef struct AllocBlockData
158
+ {
159
+ AllocSet aset; /* aset that owns this block */
160
+ AllocBlock next; /* next block in aset's blocks list */
161
+ char *freeptr; /* start of free space in this block */
162
+ char *endptr; /* end of space in this block */
163
+ } AllocBlockData;
164
+
165
+ /*
166
+ * AllocChunk
167
+ * The prefix of each piece of memory in an AllocBlock
168
+ *
169
+ * NB: this MUST match StandardChunkHeader as defined by utils/memutils.h.
170
+ */
171
+ typedef struct AllocChunkData
172
+ {
173
+ /* aset is the owning aset if allocated, or the freelist link if free */
174
+ void *aset;
175
+ /* size is always the size of the usable space in the chunk */
176
+ usize_t size;
177
+ #ifdef MEMORY_CONTEXT_CHECKING
178
+ /* when debugging memory usage, also store actual requested size */
179
+ /* this is zero in a free chunk */
180
+ usize_t requested_size;
181
+ #endif
182
+ } AllocChunkData;
183
+
184
+ /*
185
+ * AllocPointerIsValid
186
+ * True iff pointer is valid allocation pointer.
187
+ */
188
+ #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
189
+
190
+ /*
191
+ * AllocSetIsValid
192
+ * True iff set is valid allocation set.
193
+ */
194
+ #define AllocSetIsValid(set) PointerIsValid(set)
195
+
196
+ #define AllocPointerGetChunk(ptr) \
197
+ ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
198
+ #define AllocChunkGetPointer(chk) \
199
+ ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
200
+
201
+ /*
202
+ * These functions implement the MemoryContext API for AllocSet contexts.
203
+ */
204
+ static void *AllocSetAlloc(MemoryContext context, usize_t size);
205
+ static void AllocSetFree(MemoryContext context, void *pointer);
206
+ static void *AllocSetRealloc(MemoryContext context, void *pointer, usize_t size);
207
+ static void AllocSetInit(MemoryContext context);
208
+ static void AllocSetReset(MemoryContext context);
209
+ static void AllocSetDelete(MemoryContext context);
210
+ static usize_t AllocSetGetChunkSpace(MemoryContext context, void *pointer);
211
+ static bool AllocSetIsEmpty(MemoryContext context);
212
+ static void AllocSetStats(MemoryContext context);
213
+
214
+ #ifdef MEMORY_CONTEXT_CHECKING
215
+ static void AllocSetCheck(MemoryContext context);
216
+ #endif
217
+
218
+ /*
219
+ * This is the virtual function table for AllocSet contexts.
220
+ */
221
+ static MemoryContextMethods AllocSetMethods = {
222
+ AllocSetAlloc,
223
+ AllocSetFree,
224
+ AllocSetRealloc,
225
+ AllocSetInit,
226
+ AllocSetReset,
227
+ AllocSetDelete,
228
+ AllocSetGetChunkSpace,
229
+ AllocSetIsEmpty,
230
+ AllocSetStats
231
+ #ifdef MEMORY_CONTEXT_CHECKING
232
+ ,AllocSetCheck
233
+ #endif
234
+ };
235
+
236
+
237
+ /* ----------
238
+ * Debug macros
239
+ * ----------
240
+ */
241
+ #ifdef HAVE_ALLOCINFO
242
+ #define AllocFreeInfo(_cxt, _chunk) \
243
+ fprintf(stderr, "AllocFree: %s: %p, %d\n", \
244
+ (_cxt)->header.name, (_chunk), (_chunk)->size)
245
+ #define AllocAllocInfo(_cxt, _chunk) \
246
+ fprintf(stderr, "AllocAlloc: %s: %p, %d\n", \
247
+ (_cxt)->header.name, (_chunk), (_chunk)->size)
248
+ #else
249
+ #define AllocFreeInfo(_cxt, _chunk)
250
+ #define AllocAllocInfo(_cxt, _chunk)
251
+ #endif
252
+
253
+ /* ----------
254
+ * AllocSetFreeIndex -
255
+ *
256
+ * Depending on the size of an allocation compute which freechunk
257
+ * list of the alloc set it belongs to. Caller must have verified
258
+ * that size <= ALLOC_CHUNK_LIMIT.
259
+ * ----------
260
+ */
261
+ static int
262
+ AllocSetFreeIndex(usize_t size)
263
+ {
264
+ int idx = 0;
265
+
266
+ if (size > 0)
267
+ {
268
+ size = (size - 1) >> ALLOC_MINBITS;
269
+ while (size != 0)
270
+ {
271
+ idx++;
272
+ size >>= 1;
273
+ }
274
+ U_ASSERT(idx < ALLOCSET_NUM_FREELISTS);
275
+ }
276
+
277
+ return idx;
278
+ }
279
+
280
+
281
+ /*
282
+ * Public routines
283
+ */
284
+
285
+
286
+ /*
287
+ * AllocSetContextCreate
288
+ * Create a new AllocSet context.
289
+ *
290
+ * parent: parent context, or NULL if top-level context
291
+ * name: name of context (for debugging --- string will be copied)
292
+ * minContextSize: minimum context size
293
+ * initBlockSize: initial allocation block size
294
+ * maxBlockSize: maximum allocation block size
295
+ */
296
+ MemoryContext
297
+ AllocSetContextCreate(MemoryContext parent,
298
+ const char *name,
299
+ usize_t minContextSize,
300
+ usize_t initBlockSize,
301
+ usize_t maxBlockSize)
302
+ {
303
+ AllocSet context;
304
+
305
+ /* Do the type-independent part of context creation */
306
+ context = (AllocSet) MemoryContextCreate(sizeof(AllocSetContext),
307
+ &AllocSetMethods,
308
+ parent,
309
+ name);
310
+
311
+ /*
312
+ * Make sure alloc parameters are reasonable, and save them.
313
+ *
314
+ * We somewhat arbitrarily enforce a minimum 1K block size.
315
+ */
316
+ initBlockSize = MAXALIGN(initBlockSize);
317
+ if (initBlockSize < 1024)
318
+ initBlockSize = 1024;
319
+ maxBlockSize = MAXALIGN(maxBlockSize);
320
+ if (maxBlockSize < initBlockSize)
321
+ maxBlockSize = initBlockSize;
322
+ context->initBlockSize = initBlockSize;
323
+ context->maxBlockSize = maxBlockSize;
324
+
325
+ /*
326
+ * Grab always-allocated space, if requested
327
+ */
328
+ if (minContextSize > ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ)
329
+ {
330
+ usize_t blksize = MAXALIGN(minContextSize);
331
+ AllocBlock block;
332
+
333
+ block = (AllocBlock) malloc(blksize);
334
+ if (block == NULL)
335
+ {
336
+ MemoryContextStats(TopMemoryContext);
337
+ /*!!!
338
+ ereport(ERROR,
339
+ (errcode(ERRCODE_OUT_OF_MEMORY),
340
+ errmsg("out of memory"),
341
+ errdetail("Failed while creating memory context \"%s\".",
342
+ name)));
343
+ */
344
+ }
345
+ block->aset = context;
346
+ block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
347
+ block->endptr = ((char *) block) + blksize;
348
+ block->next = context->blocks;
349
+ context->blocks = block;
350
+ /* Mark block as not to be released at reset time */
351
+ context->keeper = block;
352
+ }
353
+
354
+ context->isReset = true;
355
+
356
+ return (MemoryContext) context;
357
+ }
358
+
359
+ /*
360
+ * AllocSetInit
361
+ * Context-type-specific initialization routine.
362
+ *
363
+ * This is called by MemoryContextCreate() after setting up the
364
+ * generic MemoryContext fields and before linking the new context
365
+ * into the context tree. We must do whatever is needed to make the
366
+ * new context minimally valid for deletion. We must *not* risk
367
+ * failure --- thus, for example, allocating more memory is not cool.
368
+ * (AllocSetContextCreate can allocate memory when it gets control
369
+ * back, however.)
370
+ */
371
+ static void
372
+ AllocSetInit(MemoryContext context)
373
+ {
374
+ /*
375
+ * Since MemoryContextCreate already zeroed the context node, we don't
376
+ * have to do anything here: it's already OK.
377
+ */
378
+ }
379
+
380
+ /*
381
+ * AllocSetReset
382
+ * Frees all memory which is allocated in the given set.
383
+ *
384
+ * Actually, this routine has some discretion about what to do.
385
+ * It should mark all allocated chunks freed, but it need not necessarily
386
+ * give back all the resources the set owns. Our actual implementation is
387
+ * that we hang onto any "keeper" block specified for the set. In this way,
388
+ * we don't thrash malloc() when a context is repeatedly reset after small
389
+ * allocations, which is typical behavior for per-tuple contexts.
390
+ */
391
+ static void
392
+ AllocSetReset(MemoryContext context)
393
+ {
394
+ AllocSet set = (AllocSet) context;
395
+ AllocBlock block;
396
+
397
+ U_ASSERT(AllocSetIsValid(set));
398
+
399
+ /* Nothing to do if no se_allocs since startup or last reset */
400
+ if (set->isReset)
401
+ return;
402
+
403
+ #ifdef MEMORY_CONTEXT_CHECKING
404
+ /* Check for corruption and leaks before freeing */
405
+ AllocSetCheck(context);
406
+ #endif
407
+
408
+ /* Clear chunk freelists */
409
+ MemSetAligned(set->freelist, 0, sizeof(set->freelist));
410
+
411
+ block = set->blocks;
412
+
413
+ /* New blocks list is either empty or just the keeper block */
414
+ set->blocks = set->keeper;
415
+
416
+ while (block != NULL)
417
+ {
418
+ AllocBlock next = block->next;
419
+
420
+ if (block == set->keeper)
421
+ {
422
+ /* Reset the block, but don't return it to malloc */
423
+ char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
424
+
425
+ #ifdef CLOBBER_FREED_MEMORY
426
+ /* Wipe freed memory for debugging purposes */
427
+ memset(datastart, 0x7F, block->freeptr - datastart);
428
+ #endif
429
+ block->freeptr = datastart;
430
+ block->next = NULL;
431
+ }
432
+ else
433
+ {
434
+ /* Normal case, release the block */
435
+ #ifdef CLOBBER_FREED_MEMORY
436
+ /* Wipe freed memory for debugging purposes */
437
+ memset(block, 0x7F, block->freeptr - ((char *) block));
438
+ #endif
439
+ free(block);
440
+ }
441
+ block = next;
442
+ }
443
+
444
+ set->isReset = true;
445
+ }
446
+
447
+ /*
448
+ * AllocSetDelete
449
+ * Frees all memory which is allocated in the given set,
450
+ * in preparation for deletion of the set.
451
+ *
452
+ * Unlike AllocSetReset, this *must* free all resources of the set.
453
+ * But note we are not responsible for deleting the context node itself.
454
+ */
455
+ static void
456
+ AllocSetDelete(MemoryContext context)
457
+ {
458
+ AllocSet set = (AllocSet) context;
459
+ AllocBlock block = set->blocks;
460
+
461
+ U_ASSERT(AllocSetIsValid(set));
462
+
463
+ #ifdef MEMORY_CONTEXT_CHECKING
464
+ /* Check for corruption and leaks before freeing */
465
+ AllocSetCheck(context);
466
+ #endif
467
+
468
+ /* Make it look empty, just in case... */
469
+ MemSetAligned(set->freelist, 0, sizeof(set->freelist));
470
+ set->blocks = NULL;
471
+ set->keeper = NULL;
472
+
473
+ while (block != NULL)
474
+ {
475
+ AllocBlock next = block->next;
476
+
477
+ #ifdef CLOBBER_FREED_MEMORY
478
+ /* Wipe freed memory for debugging purposes */
479
+ memset(block, 0x7F, block->freeptr - ((char *) block));
480
+ #endif
481
+ free(block);
482
+ block = next;
483
+ }
484
+ }
485
+
486
+ /*
487
+ * AllocSetAlloc
488
+ * Returns pointer to allocated memory of given size; memory is added
489
+ * to the set.
490
+ */
491
+ static void *
492
+ AllocSetAlloc(MemoryContext context, usize_t size)
493
+ {
494
+ AllocSet set = (AllocSet) context;
495
+ AllocBlock block;
496
+ AllocChunk chunk;
497
+ AllocChunk priorfree;
498
+ int fidx;
499
+ usize_t chunk_size;
500
+ usize_t blksize;
501
+
502
+ U_ASSERT(AllocSetIsValid(set));
503
+
504
+ /*
505
+ * If requested size exceeds maximum for chunks, allocate an entire block
506
+ * for this request.
507
+ */
508
+ if (size > ALLOC_CHUNK_LIMIT)
509
+ {
510
+ chunk_size = MAXALIGN(size);
511
+ blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
512
+ block = (AllocBlock) malloc(blksize);
513
+ if (block == NULL)
514
+ {
515
+ MemoryContextStats(TopMemoryContext);
516
+ /*
517
+ ereport(ERROR,
518
+ (errcode(ERRCODE_OUT_OF_MEMORY),
519
+ errmsg("out of memory"),
520
+ errdetail("Failed on request of size %lu.",
521
+ (unsigned long) size)));
522
+ */
523
+ }
524
+ block->aset = set;
525
+ block->freeptr = block->endptr = ((char *) block) + blksize;
526
+
527
+ chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
528
+ chunk->aset = set;
529
+ chunk->size = chunk_size;
530
+ #ifdef MEMORY_CONTEXT_CHECKING
531
+ chunk->requested_size = size;
532
+ /* set mark to catch clobber of "unused" space */
533
+ if (size < chunk_size)
534
+ ((char *) AllocChunkGetPointer(chunk))[size] = 0x7E;
535
+ #endif
536
+
537
+ /*
538
+ * Stick the new block underneath the active allocation block, so that
539
+ * we don't lose the use of the space remaining therein.
540
+ */
541
+ if (set->blocks != NULL)
542
+ {
543
+ block->next = set->blocks->next;
544
+ set->blocks->next = block;
545
+ }
546
+ else
547
+ {
548
+ block->next = NULL;
549
+ set->blocks = block;
550
+ }
551
+
552
+ set->isReset = false;
553
+
554
+ AllocAllocInfo(set, chunk);
555
+ return AllocChunkGetPointer(chunk);
556
+ }
557
+
558
+ /*
559
+ * Request is small enough to be treated as a chunk. Look in the
560
+ * corresponding free list to see if there is a free chunk we could reuse.
561
+ */
562
+ fidx = AllocSetFreeIndex(size);
563
+ priorfree = NULL;
564
+ for (chunk = set->freelist[fidx]; chunk; chunk = (AllocChunk) chunk->aset)
565
+ {
566
+ if (chunk->size >= size)
567
+ break;
568
+ priorfree = chunk;
569
+ }
570
+
571
+ /*
572
+ * If one is found, remove it from the free list, make it again a member
573
+ * of the alloc set and return its data address.
574
+ */
575
+ if (chunk != NULL)
576
+ {
577
+ if (priorfree == NULL)
578
+ set->freelist[fidx] = (AllocChunk) chunk->aset;
579
+ else
580
+ priorfree->aset = chunk->aset;
581
+
582
+ chunk->aset = (void *) set;
583
+
584
+ #ifdef MEMORY_CONTEXT_CHECKING
585
+ chunk->requested_size = size;
586
+ /* set mark to catch clobber of "unused" space */
587
+ if (size < chunk->size)
588
+ ((char *) AllocChunkGetPointer(chunk))[size] = 0x7E;
589
+ #endif
590
+
591
+ /* isReset must be false already */
592
+ U_ASSERT(!set->isReset);
593
+
594
+ AllocAllocInfo(set, chunk);
595
+ return AllocChunkGetPointer(chunk);
596
+ }
597
+
598
+ /*
599
+ * Choose the actual chunk size to allocate.
600
+ */
601
+ chunk_size = 1 << (fidx + ALLOC_MINBITS);
602
+ U_ASSERT(chunk_size >= size);
603
+
604
+ /*
605
+ * If there is enough room in the active allocation block, we will put the
606
+ * chunk into that block. Else must start a new one.
607
+ */
608
+ if ((block = set->blocks) != NULL)
609
+ {
610
+ usize_t availspace = block->endptr - block->freeptr;
611
+
612
+ if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
613
+ {
614
+ /*
615
+ * The existing active (top) block does not have enough room for
616
+ * the requested allocation, but it might still have a useful
617
+ * amount of space in it. Once we push it down in the block list,
618
+ * we'll never try to allocate more space from it. So, before we
619
+ * do that, carve up its free space into chunks that we can put on
620
+ * the set's freelists.
621
+ *
622
+ * Because we can only get here when there's less than
623
+ * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
624
+ * more than ALLOCSET_NUM_FREELISTS-1 times.
625
+ */
626
+ while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
627
+ {
628
+ usize_t availchunk = availspace - ALLOC_CHUNKHDRSZ;
629
+ int a_fidx = AllocSetFreeIndex(availchunk);
630
+
631
+ /*
632
+ * In most cases, we'll get back the index of the next larger
633
+ * freelist than the one we need to put this chunk on. The
634
+ * exception is when availchunk is exactly a power of 2.
635
+ */
636
+ if (availchunk != (1 << (a_fidx + ALLOC_MINBITS)))
637
+ {
638
+ a_fidx--;
639
+ U_ASSERT(a_fidx >= 0);
640
+ availchunk = (1 << (a_fidx + ALLOC_MINBITS));
641
+ }
642
+
643
+ chunk = (AllocChunk) (block->freeptr);
644
+
645
+ block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
646
+ availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
647
+
648
+ chunk->size = availchunk;
649
+ #ifdef MEMORY_CONTEXT_CHECKING
650
+ chunk->requested_size = 0; /* mark it free */
651
+ #endif
652
+ chunk->aset = (void *) set->freelist[a_fidx];
653
+ set->freelist[a_fidx] = chunk;
654
+ }
655
+
656
+ /* Mark that we need to create a new block */
657
+ block = NULL;
658
+ }
659
+ }
660
+
661
+ /*
662
+ * Time to create a new regular (multi-chunk) block?
663
+ */
664
+ if (block == NULL)
665
+ {
666
+ usize_t required_size;
667
+
668
+ if (set->blocks == NULL)
669
+ {
670
+ /* First block of the alloc set, use initBlockSize */
671
+ blksize = set->initBlockSize;
672
+ }
673
+ else
674
+ {
675
+ /*
676
+ * Use first power of 2 that is larger than previous block, but
677
+ * not more than the allowed limit. (We don't simply double the
678
+ * prior block size, because in some cases this could be a funny
679
+ * size, eg if very first allocation was for an odd-sized large
680
+ * chunk.)
681
+ */
682
+ usize_t pblksize = set->blocks->endptr - ((char *) set->blocks);
683
+
684
+ blksize = set->initBlockSize;
685
+ while (blksize <= pblksize)
686
+ blksize <<= 1;
687
+ if (blksize > set->maxBlockSize)
688
+ blksize = set->maxBlockSize;
689
+ }
690
+
691
+ /*
692
+ * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
693
+ * space... but try to keep it a power of 2.
694
+ */
695
+ required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
696
+ while (blksize < required_size)
697
+ blksize <<= 1;
698
+
699
+ /* Try to allocate it */
700
+ block = (AllocBlock) malloc(blksize);
701
+
702
+ /*
703
+ * We could be asking for pretty big blocks here, so cope if malloc
704
+ * fails. But give up if there's less than a meg or so available...
705
+ */
706
+ while (block == NULL && blksize > 1024 * 1024)
707
+ {
708
+ blksize >>= 1;
709
+ if (blksize < required_size)
710
+ break;
711
+ block = (AllocBlock) malloc(blksize);
712
+ }
713
+
714
+ if (block == NULL)
715
+ {
716
+ MemoryContextStats(TopMemoryContext);
717
+ /*
718
+ ereport(ERROR,
719
+ (errcode(ERRCODE_OUT_OF_MEMORY),
720
+ errmsg("out of memory"),
721
+ errdetail("Failed on request of size %lu.",
722
+ (unsigned long) size)));
723
+ */
724
+ }
725
+
726
+ block->aset = set;
727
+ block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
728
+ block->endptr = ((char *) block) + blksize;
729
+
730
+ /*
731
+ * If this is the first block of the set, make it the "keeper" block.
732
+ * Formerly, a keeper block could only be created during context
733
+ * creation, but allowing it to happen here lets us have fast reset
734
+ * cycling even for contexts created with minContextSize = 0; that way
735
+ * we don't have to force space to be allocated in contexts that might
736
+ * never need any space. Don't mark an oversize block as a keeper,
737
+ * however.
738
+ */
739
+ if (set->blocks == NULL && blksize == set->initBlockSize)
740
+ {
741
+ U_ASSERT(set->keeper == NULL);
742
+ set->keeper = block;
743
+ }
744
+
745
+ block->next = set->blocks;
746
+ set->blocks = block;
747
+ }
748
+
749
+ /*
750
+ * OK, do the allocation
751
+ */
752
+ chunk = (AllocChunk) (block->freeptr);
753
+
754
+ block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
755
+ U_ASSERT(block->freeptr <= block->endptr);
756
+
757
+ chunk->aset = (void *) set;
758
+ chunk->size = chunk_size;
759
+ #ifdef MEMORY_CONTEXT_CHECKING
760
+ chunk->requested_size = size;
761
+ /* set mark to catch clobber of "unused" space */
762
+ if (size < chunk->size)
763
+ ((char *) AllocChunkGetPointer(chunk))[size] = 0x7E;
764
+ #endif
765
+
766
+ set->isReset = false;
767
+
768
+ AllocAllocInfo(set, chunk);
769
+ return AllocChunkGetPointer(chunk);
770
+ }
771
+
772
+ /*
773
+ * AllocSetFree
774
+ * Frees allocated memory; memory is removed from the set.
775
+ */
776
+ static void
777
+ AllocSetFree(MemoryContext context, void *pointer)
778
+ {
779
+ AllocSet set = (AllocSet) context;
780
+ AllocChunk chunk = AllocPointerGetChunk(pointer);
781
+
782
+ AllocFreeInfo(set, chunk);
783
+
784
+ #ifdef MEMORY_CONTEXT_CHECKING
785
+ /* Test for someone scribbling on unused space in chunk */
786
+ if (chunk->requested_size < chunk->size)
787
+ if (((char *) pointer)[chunk->requested_size] != 0x7E)
788
+ /*
789
+ elog(WARNING, "detected write past chunk end in %s %p",
790
+ set->header.name, chunk)*/;
791
+ #endif
792
+
793
+ if (chunk->size > ALLOC_CHUNK_LIMIT)
794
+ {
795
+ /*
796
+ * Big chunks are certain to have been allocated as single-chunk
797
+ * blocks. Find the containing block and return it to malloc().
798
+ */
799
+ AllocBlock block = set->blocks;
800
+ AllocBlock prevblock = NULL;
801
+
802
+ while (block != NULL)
803
+ {
804
+ if (chunk == (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ))
805
+ break;
806
+ prevblock = block;
807
+ block = block->next;
808
+ }
809
+ if (block == NULL)
810
+ /*
811
+ elog(ERROR, "could not find block containing chunk %p", chunk);
812
+ //let's just make sure chunk is the only one in the block
813
+ U_ASSERT(block->freeptr == ((char *) block) +
814
+ (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
815
+ */;
816
+
817
+ /* OK, remove block from aset's list and free it */
818
+ if (prevblock == NULL)
819
+ set->blocks = block->next;
820
+ else
821
+ prevblock->next = block->next;
822
+ #ifdef CLOBBER_FREED_MEMORY
823
+ /* Wipe freed memory for debugging purposes */
824
+ memset(block, 0x7F, block->freeptr - ((char *) block));
825
+ #endif
826
+ free(block);
827
+ }
828
+ else
829
+ {
830
+ /* Normal case, put the chunk into appropriate freelist */
831
+ int fidx = AllocSetFreeIndex(chunk->size);
832
+
833
+ chunk->aset = (void *) set->freelist[fidx];
834
+
835
+ #ifdef CLOBBER_FREED_MEMORY
836
+ /* Wipe freed memory for debugging purposes */
837
+ memset(pointer, 0x7F, chunk->size);
838
+ #endif
839
+
840
+ #ifdef MEMORY_CONTEXT_CHECKING
841
+ /* Reset requested_size to 0 in chunks that are on freelist */
842
+ chunk->requested_size = 0;
843
+ #endif
844
+ set->freelist[fidx] = chunk;
845
+ }
846
+ }
847
+
848
+ /*
849
+ * AllocSetRealloc
850
+ * Returns new pointer to allocated memory of given size; this memory
851
+ * is added to the set. Memory associated with given pointer is copied
852
+ * into the new memory, and the old memory is freed.
853
+ */
854
+ static void *
855
+ AllocSetRealloc(MemoryContext context, void *pointer, usize_t size)
856
+ {
857
+ AllocSet set = (AllocSet) context;
858
+ AllocChunk chunk = AllocPointerGetChunk(pointer);
859
+ usize_t oldsize = chunk->size;
860
+
861
+ #ifdef MEMORY_CONTEXT_CHECKING
862
+ /* Test for someone scribbling on unused space in chunk */
863
+ if (chunk->requested_size < oldsize)
864
+ if (((char *) pointer)[chunk->requested_size] != 0x7E)
865
+ /*
866
+ elog(WARNING, "detected write past chunk end in %s %p",
867
+ set->header.name, chunk)*/;
868
+ #endif
869
+
870
+ /* isReset must be false already */
871
+ U_ASSERT(!set->isReset);
872
+
873
+ /*
874
+ * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
875
+ * allocated area already is >= the new size. (In particular, we always
876
+ * fall out here if the requested size is a decrease.)
877
+ */
878
+ if (oldsize >= size)
879
+ {
880
+ #ifdef MEMORY_CONTEXT_CHECKING
881
+ chunk->requested_size = size;
882
+ /* set mark to catch clobber of "unused" space */
883
+ if (size < oldsize)
884
+ ((char *) pointer)[size] = 0x7E;
885
+ #endif
886
+ return pointer;
887
+ }
888
+
889
+ if (oldsize > ALLOC_CHUNK_LIMIT)
890
+ {
891
+ /*
892
+ * The chunk must been allocated as a single-chunk block. Find the
893
+ * containing block and use realloc() to make it bigger with minimum
894
+ * space wastage.
895
+ */
896
+ AllocBlock block = set->blocks;
897
+ AllocBlock prevblock = NULL;
898
+ usize_t chksize;
899
+ usize_t blksize;
900
+
901
+ while (block != NULL)
902
+ {
903
+ if (chunk == (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ))
904
+ break;
905
+ prevblock = block;
906
+ block = block->next;
907
+ }
908
+ if (block == NULL)
909
+ /*
910
+ elog(ERROR, "could not find block containing chunk %p", chunk);
911
+ // let's just make sure chunk is the only one in the block
912
+ U_ASSERT(block->freeptr == ((char *) block) +
913
+ (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
914
+ */;
915
+
916
+ /* Do the realloc */
917
+ chksize = MAXALIGN(size);
918
+ blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
919
+ block = (AllocBlock) realloc(block, blksize);
920
+ if (block == NULL)
921
+ {
922
+ MemoryContextStats(TopMemoryContext);
923
+ /*
924
+ ereport(ERROR,
925
+ (errcode(ERRCODE_OUT_OF_MEMORY),
926
+ errmsg("out of memory"),
927
+ errdetail("Failed on request of size %lu.",
928
+ (unsigned long) size)));
929
+ */
930
+ }
931
+ block->freeptr = block->endptr = ((char *) block) + blksize;
932
+
933
+ /* Update pointers since block has likely been moved */
934
+ chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
935
+ if (prevblock == NULL)
936
+ set->blocks = block;
937
+ else
938
+ prevblock->next = block;
939
+ chunk->size = chksize;
940
+
941
+ #ifdef MEMORY_CONTEXT_CHECKING
942
+ chunk->requested_size = size;
943
+ /* set mark to catch clobber of "unused" space */
944
+ if (size < chunk->size)
945
+ ((char *) AllocChunkGetPointer(chunk))[size] = 0x7E;
946
+ #endif
947
+
948
+ return AllocChunkGetPointer(chunk);
949
+ }
950
+ else
951
+ {
952
+ /*
953
+ * Small-chunk case. If the chunk is the last one in its block, there
954
+ * might be enough free space after it that we can just enlarge the
955
+ * chunk in-place. It's relatively painful to find the containing
956
+ * block in the general case, but we can detect last-ness quite
957
+ * cheaply for the typical case where the chunk is in the active
958
+ * (topmost) allocation block. (At least with the regression tests
959
+ * and code as of 1/2001, realloc'ing the last chunk of a non-topmost
960
+ * block hardly ever happens, so it's not worth scanning the block
961
+ * list to catch that case.)
962
+ *
963
+ * NOTE: must be careful not to create a chunk of a size that
964
+ * AllocSetAlloc would not create, else we'll get confused later.
965
+ */
966
+ AllocPointer newPointer;
967
+
968
+ if (size <= ALLOC_CHUNK_LIMIT)
969
+ {
970
+ AllocBlock block = set->blocks;
971
+ char *chunk_end;
972
+
973
+ chunk_end = (char *) chunk + (oldsize + ALLOC_CHUNKHDRSZ);
974
+ if (chunk_end == block->freeptr)
975
+ {
976
+ /* OK, it's last in block ... is there room? */
977
+ usize_t freespace = block->endptr - block->freeptr;
978
+ int fidx;
979
+ usize_t newsize;
980
+ usize_t delta;
981
+
982
+ fidx = AllocSetFreeIndex(size);
983
+ newsize = 1 << (fidx + ALLOC_MINBITS);
984
+ U_ASSERT(newsize >= oldsize);
985
+ delta = newsize - oldsize;
986
+ if (freespace >= delta)
987
+ {
988
+ /* Yes, so just enlarge the chunk. */
989
+ block->freeptr += delta;
990
+ chunk->size += delta;
991
+ #ifdef MEMORY_CONTEXT_CHECKING
992
+ chunk->requested_size = size;
993
+ /* set mark to catch clobber of "unused" space */
994
+ if (size < chunk->size)
995
+ ((char *) pointer)[size] = 0x7E;
996
+ #endif
997
+ return pointer;
998
+ }
999
+ }
1000
+ }
1001
+
1002
+ /* Normal small-chunk case: just do it by brute force. */
1003
+
1004
+ /* allocate new chunk */
1005
+ newPointer = AllocSetAlloc((MemoryContext) set, size);
1006
+
1007
+ /* transfer existing data (certain to fit) */
1008
+ memcpy(newPointer, pointer, oldsize);
1009
+
1010
+ /* free old chunk */
1011
+ AllocSetFree((MemoryContext) set, pointer);
1012
+
1013
+ return newPointer;
1014
+ }
1015
+ }
1016
+
1017
+ /*
1018
+ * AllocSetGetChunkSpace
1019
+ * Given a currently-allocated chunk, determine the total space
1020
+ * it occupies (including all memory-allocation overhead).
1021
+ */
1022
+ static usize_t
1023
+ AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1024
+ {
1025
+ AllocChunk chunk = AllocPointerGetChunk(pointer);
1026
+
1027
+ return chunk->size + ALLOC_CHUNKHDRSZ;
1028
+ }
1029
+
1030
+ /*
1031
+ * AllocSetIsEmpty
1032
+ * Is an allocset empty of any allocated space?
1033
+ */
1034
+ static bool
1035
+ AllocSetIsEmpty(MemoryContext context)
1036
+ {
1037
+ AllocSet set = (AllocSet) context;
1038
+
1039
+ /*
1040
+ * For now, we say "empty" only if the context is new or just reset. We
1041
+ * could examine the freelists to determine if all space has been freed,
1042
+ * but it's not really worth the trouble for present uses of this
1043
+ * functionality.
1044
+ */
1045
+ if (set->isReset)
1046
+ return true;
1047
+ return false;
1048
+ }
1049
+
1050
+ /*
1051
+ * AllocSetStats
1052
+ * Displays stats about memory consumption of an allocset.
1053
+ */
1054
+ static void
1055
+ AllocSetStats(MemoryContext context)
1056
+ {
1057
+ AllocSet set = (AllocSet) context;
1058
+ long nblocks = 0;
1059
+ long nchunks = 0;
1060
+ long totalspace = 0;
1061
+ long freespace = 0;
1062
+ AllocBlock block;
1063
+ AllocChunk chunk;
1064
+ int fidx;
1065
+
1066
+ for (block = set->blocks; block != NULL; block = block->next)
1067
+ {
1068
+ nblocks++;
1069
+ totalspace += block->endptr - ((char *) block);
1070
+ freespace += block->endptr - block->freeptr;
1071
+ }
1072
+ for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1073
+ {
1074
+ for (chunk = set->freelist[fidx]; chunk != NULL;
1075
+ chunk = (AllocChunk) chunk->aset)
1076
+ {
1077
+ nchunks++;
1078
+ freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1079
+ }
1080
+ }
1081
+ fprintf(stderr,
1082
+ "%s: %ld total in %ld blocks; %ld free (%ld chunks); %ld used\n",
1083
+ set->header.name, totalspace, nblocks, freespace, nchunks,
1084
+ totalspace - freespace);
1085
+ }
1086
+
1087
+
1088
+ #ifdef MEMORY_CONTEXT_CHECKING
1089
+
1090
+ /*
1091
+ * AllocSetCheck
1092
+ * Walk through chunks and check consistency of memory.
1093
+ *
1094
+ * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1095
+ * find yourself in an infinite loop when trouble occurs, because this
1096
+ * routine will be entered again when elog cleanup tries to release memory!
1097
+ */
1098
+ static void
1099
+ AllocSetCheck(MemoryContext context)
1100
+ {
1101
+ AllocSet set = (AllocSet) context;
1102
+ char *name = set->header.name;
1103
+ AllocBlock block;
1104
+
1105
+ for (block = set->blocks; block != NULL; block = block->next)
1106
+ {
1107
+ char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1108
+ long blk_used = block->freeptr - bpoz;
1109
+ long blk_data = 0;
1110
+ long nchunks = 0;
1111
+
1112
+ /*
1113
+ * Empty block - empty can be keeper-block only
1114
+ */
1115
+ if (!blk_used)
1116
+ {
1117
+ if (set->keeper != block)
1118
+ /*
1119
+ elog(WARNING, "problem in alloc set %s: empty block %p",
1120
+ name, block)*/;
1121
+ }
1122
+
1123
+ /*
1124
+ * Chunk walker
1125
+ */
1126
+ while (bpoz < block->freeptr)
1127
+ {
1128
+ AllocChunk chunk = (AllocChunk) bpoz;
1129
+ usize_t chsize,
1130
+ dsize;
1131
+ char *chdata_end;
1132
+
1133
+ chsize = chunk->size; /* aligned chunk size */
1134
+ dsize = chunk->requested_size; /* real data */
1135
+ chdata_end = ((char *) chunk) + (ALLOC_CHUNKHDRSZ + dsize);
1136
+
1137
+ /*
1138
+ * Check chunk size
1139
+ */
1140
+ if (dsize > chsize)
1141
+ /*
1142
+ elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1143
+ name, chunk, block)*/;
1144
+ if (chsize < (1 << ALLOC_MINBITS))
1145
+ /*
1146
+ elog(WARNING, "problem in alloc set %s: bad size %lu for chunk %p in block %p",
1147
+ name, (unsigned long) chsize, chunk, block)*/;
1148
+
1149
+ /* single-chunk block? */
1150
+ if (chsize > ALLOC_CHUNK_LIMIT &&
1151
+ chsize + ALLOC_CHUNKHDRSZ != blk_used)
1152
+ /*
1153
+ elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1154
+ name, chunk, block)*/;
1155
+
1156
+ /*
1157
+ * If chunk is allocated, check for correct aset pointer. (If it's
1158
+ * free, the aset is the freelist pointer, which we can't check as
1159
+ * easily...)
1160
+ */
1161
+ if (dsize > 0 && chunk->aset != (void *) set)
1162
+ /*elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1163
+ name, block, chunk)*/;
1164
+
1165
+ /*
1166
+ * Check for overwrite of "unallocated" space in chunk
1167
+ */
1168
+ if (dsize > 0 && dsize < chsize && *chdata_end != 0x7E)
1169
+ /*elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1170
+ name, block, chunk)*/;
1171
+
1172
+ blk_data += chsize;
1173
+ nchunks++;
1174
+
1175
+ bpoz += ALLOC_CHUNKHDRSZ + chsize;
1176
+ }
1177
+
1178
+ if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1179
+ /*elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1180
+ name, block)*/;
1181
+ }
1182
+ }
1183
+ #endif /* MEMORY_CONTEXT_CHECKING */
1184
+
1185
+ #endif /* SE_MEMORY_TRACK */