pg_query 1.1.0 → 2.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (478) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +163 -52
  3. data/README.md +80 -69
  4. data/Rakefile +82 -1
  5. data/ext/pg_query/extconf.rb +3 -31
  6. data/ext/pg_query/guc-file.c +0 -0
  7. data/ext/pg_query/include/access/amapi.h +246 -0
  8. data/ext/pg_query/include/access/attmap.h +52 -0
  9. data/ext/pg_query/include/access/attnum.h +64 -0
  10. data/ext/pg_query/include/access/clog.h +61 -0
  11. data/ext/pg_query/include/access/commit_ts.h +77 -0
  12. data/ext/pg_query/include/access/detoast.h +92 -0
  13. data/ext/pg_query/include/access/genam.h +228 -0
  14. data/ext/pg_query/include/access/gin.h +78 -0
  15. data/ext/pg_query/include/access/htup.h +89 -0
  16. data/ext/pg_query/include/access/htup_details.h +819 -0
  17. data/ext/pg_query/include/access/itup.h +161 -0
  18. data/ext/pg_query/include/access/parallel.h +82 -0
  19. data/ext/pg_query/include/access/printtup.h +35 -0
  20. data/ext/pg_query/include/access/relation.h +28 -0
  21. data/ext/pg_query/include/access/relscan.h +176 -0
  22. data/ext/pg_query/include/access/rmgr.h +35 -0
  23. data/ext/pg_query/include/access/rmgrlist.h +49 -0
  24. data/ext/pg_query/include/access/sdir.h +58 -0
  25. data/ext/pg_query/include/access/skey.h +151 -0
  26. data/ext/pg_query/include/access/stratnum.h +83 -0
  27. data/ext/pg_query/include/access/sysattr.h +29 -0
  28. data/ext/pg_query/include/access/table.h +27 -0
  29. data/ext/pg_query/include/access/tableam.h +1825 -0
  30. data/ext/pg_query/include/access/transam.h +265 -0
  31. data/ext/pg_query/include/access/tupconvert.h +51 -0
  32. data/ext/pg_query/include/access/tupdesc.h +154 -0
  33. data/ext/pg_query/include/access/tupmacs.h +247 -0
  34. data/ext/pg_query/include/access/twophase.h +61 -0
  35. data/ext/pg_query/include/access/xact.h +463 -0
  36. data/ext/pg_query/include/access/xlog.h +398 -0
  37. data/ext/pg_query/include/access/xlog_internal.h +330 -0
  38. data/ext/pg_query/include/access/xlogdefs.h +109 -0
  39. data/ext/pg_query/include/access/xloginsert.h +64 -0
  40. data/ext/pg_query/include/access/xlogreader.h +327 -0
  41. data/ext/pg_query/include/access/xlogrecord.h +227 -0
  42. data/ext/pg_query/include/bootstrap/bootstrap.h +62 -0
  43. data/ext/pg_query/include/c.h +1322 -0
  44. data/ext/pg_query/include/catalog/catalog.h +42 -0
  45. data/ext/pg_query/include/catalog/catversion.h +58 -0
  46. data/ext/pg_query/include/catalog/dependency.h +275 -0
  47. data/ext/pg_query/include/catalog/genbki.h +64 -0
  48. data/ext/pg_query/include/catalog/index.h +199 -0
  49. data/ext/pg_query/include/catalog/indexing.h +366 -0
  50. data/ext/pg_query/include/catalog/namespace.h +188 -0
  51. data/ext/pg_query/include/catalog/objectaccess.h +197 -0
  52. data/ext/pg_query/include/catalog/objectaddress.h +84 -0
  53. data/ext/pg_query/include/catalog/pg_aggregate.h +176 -0
  54. data/ext/pg_query/include/catalog/pg_aggregate_d.h +77 -0
  55. data/ext/pg_query/include/catalog/pg_am.h +60 -0
  56. data/ext/pg_query/include/catalog/pg_am_d.h +45 -0
  57. data/ext/pg_query/include/catalog/pg_attribute.h +204 -0
  58. data/ext/pg_query/include/catalog/pg_attribute_d.h +59 -0
  59. data/ext/pg_query/include/catalog/pg_authid.h +58 -0
  60. data/ext/pg_query/include/catalog/pg_authid_d.h +49 -0
  61. data/ext/pg_query/include/catalog/pg_class.h +200 -0
  62. data/ext/pg_query/include/catalog/pg_class_d.h +103 -0
  63. data/ext/pg_query/include/catalog/pg_collation.h +73 -0
  64. data/ext/pg_query/include/catalog/pg_collation_d.h +45 -0
  65. data/ext/pg_query/include/catalog/pg_constraint.h +247 -0
  66. data/ext/pg_query/include/catalog/pg_constraint_d.h +67 -0
  67. data/ext/pg_query/include/catalog/pg_control.h +250 -0
  68. data/ext/pg_query/include/catalog/pg_conversion.h +72 -0
  69. data/ext/pg_query/include/catalog/pg_conversion_d.h +35 -0
  70. data/ext/pg_query/include/catalog/pg_depend.h +73 -0
  71. data/ext/pg_query/include/catalog/pg_depend_d.h +34 -0
  72. data/ext/pg_query/include/catalog/pg_event_trigger.h +51 -0
  73. data/ext/pg_query/include/catalog/pg_event_trigger_d.h +34 -0
  74. data/ext/pg_query/include/catalog/pg_index.h +80 -0
  75. data/ext/pg_query/include/catalog/pg_index_d.h +56 -0
  76. data/ext/pg_query/include/catalog/pg_language.h +67 -0
  77. data/ext/pg_query/include/catalog/pg_language_d.h +39 -0
  78. data/ext/pg_query/include/catalog/pg_namespace.h +59 -0
  79. data/ext/pg_query/include/catalog/pg_namespace_d.h +34 -0
  80. data/ext/pg_query/include/catalog/pg_opclass.h +85 -0
  81. data/ext/pg_query/include/catalog/pg_opclass_d.h +49 -0
  82. data/ext/pg_query/include/catalog/pg_operator.h +102 -0
  83. data/ext/pg_query/include/catalog/pg_operator_d.h +106 -0
  84. data/ext/pg_query/include/catalog/pg_opfamily.h +60 -0
  85. data/ext/pg_query/include/catalog/pg_opfamily_d.h +47 -0
  86. data/ext/pg_query/include/catalog/pg_partitioned_table.h +63 -0
  87. data/ext/pg_query/include/catalog/pg_partitioned_table_d.h +35 -0
  88. data/ext/pg_query/include/catalog/pg_proc.h +211 -0
  89. data/ext/pg_query/include/catalog/pg_proc_d.h +99 -0
  90. data/ext/pg_query/include/catalog/pg_publication.h +115 -0
  91. data/ext/pg_query/include/catalog/pg_publication_d.h +36 -0
  92. data/ext/pg_query/include/catalog/pg_replication_origin.h +57 -0
  93. data/ext/pg_query/include/catalog/pg_replication_origin_d.h +29 -0
  94. data/ext/pg_query/include/catalog/pg_statistic.h +275 -0
  95. data/ext/pg_query/include/catalog/pg_statistic_d.h +194 -0
  96. data/ext/pg_query/include/catalog/pg_statistic_ext.h +74 -0
  97. data/ext/pg_query/include/catalog/pg_statistic_ext_d.h +40 -0
  98. data/ext/pg_query/include/catalog/pg_transform.h +45 -0
  99. data/ext/pg_query/include/catalog/pg_transform_d.h +32 -0
  100. data/ext/pg_query/include/catalog/pg_trigger.h +137 -0
  101. data/ext/pg_query/include/catalog/pg_trigger_d.h +106 -0
  102. data/ext/pg_query/include/catalog/pg_ts_config.h +50 -0
  103. data/ext/pg_query/include/catalog/pg_ts_config_d.h +32 -0
  104. data/ext/pg_query/include/catalog/pg_ts_dict.h +54 -0
  105. data/ext/pg_query/include/catalog/pg_ts_dict_d.h +33 -0
  106. data/ext/pg_query/include/catalog/pg_ts_parser.h +57 -0
  107. data/ext/pg_query/include/catalog/pg_ts_parser_d.h +35 -0
  108. data/ext/pg_query/include/catalog/pg_ts_template.h +48 -0
  109. data/ext/pg_query/include/catalog/pg_ts_template_d.h +32 -0
  110. data/ext/pg_query/include/catalog/pg_type.h +372 -0
  111. data/ext/pg_query/include/catalog/pg_type_d.h +285 -0
  112. data/ext/pg_query/include/catalog/storage.h +48 -0
  113. data/ext/pg_query/include/commands/async.h +54 -0
  114. data/ext/pg_query/include/commands/dbcommands.h +35 -0
  115. data/ext/pg_query/include/commands/defrem.h +173 -0
  116. data/ext/pg_query/include/commands/event_trigger.h +88 -0
  117. data/ext/pg_query/include/commands/explain.h +127 -0
  118. data/ext/pg_query/include/commands/prepare.h +61 -0
  119. data/ext/pg_query/include/commands/tablespace.h +67 -0
  120. data/ext/pg_query/include/commands/trigger.h +277 -0
  121. data/ext/pg_query/include/commands/user.h +37 -0
  122. data/ext/pg_query/include/commands/vacuum.h +293 -0
  123. data/ext/pg_query/include/commands/variable.h +38 -0
  124. data/ext/pg_query/include/common/file_perm.h +56 -0
  125. data/ext/pg_query/include/common/hashfn.h +104 -0
  126. data/ext/pg_query/include/common/ip.h +37 -0
  127. data/ext/pg_query/include/common/keywords.h +33 -0
  128. data/ext/pg_query/include/common/kwlookup.h +44 -0
  129. data/ext/pg_query/include/common/relpath.h +90 -0
  130. data/ext/pg_query/include/common/string.h +19 -0
  131. data/ext/pg_query/include/common/unicode_combining_table.h +196 -0
  132. data/ext/pg_query/include/datatype/timestamp.h +197 -0
  133. data/ext/pg_query/include/executor/execdesc.h +70 -0
  134. data/ext/pg_query/include/executor/executor.h +614 -0
  135. data/ext/pg_query/include/executor/functions.h +41 -0
  136. data/ext/pg_query/include/executor/instrument.h +101 -0
  137. data/ext/pg_query/include/executor/spi.h +175 -0
  138. data/ext/pg_query/include/executor/tablefunc.h +67 -0
  139. data/ext/pg_query/include/executor/tuptable.h +487 -0
  140. data/ext/pg_query/include/fmgr.h +775 -0
  141. data/ext/pg_query/include/funcapi.h +348 -0
  142. data/ext/pg_query/include/getaddrinfo.h +162 -0
  143. data/ext/pg_query/include/jit/jit.h +105 -0
  144. data/ext/pg_query/include/kwlist_d.h +1072 -0
  145. data/ext/pg_query/include/lib/ilist.h +727 -0
  146. data/ext/pg_query/include/lib/pairingheap.h +102 -0
  147. data/ext/pg_query/include/lib/simplehash.h +1059 -0
  148. data/ext/pg_query/include/lib/stringinfo.h +161 -0
  149. data/ext/pg_query/include/libpq/auth.h +29 -0
  150. data/ext/pg_query/include/libpq/crypt.h +46 -0
  151. data/ext/pg_query/include/libpq/hba.h +140 -0
  152. data/ext/pg_query/include/libpq/libpq-be.h +326 -0
  153. data/ext/pg_query/include/libpq/libpq.h +133 -0
  154. data/ext/pg_query/include/libpq/pqcomm.h +208 -0
  155. data/ext/pg_query/include/libpq/pqformat.h +210 -0
  156. data/ext/pg_query/include/libpq/pqsignal.h +42 -0
  157. data/ext/pg_query/include/mb/pg_wchar.h +672 -0
  158. data/ext/pg_query/include/mb/stringinfo_mb.h +24 -0
  159. data/ext/pg_query/include/miscadmin.h +476 -0
  160. data/ext/pg_query/include/nodes/bitmapset.h +122 -0
  161. data/ext/pg_query/include/nodes/execnodes.h +2520 -0
  162. data/ext/pg_query/include/nodes/extensible.h +160 -0
  163. data/ext/pg_query/include/nodes/lockoptions.h +61 -0
  164. data/ext/pg_query/include/nodes/makefuncs.h +108 -0
  165. data/ext/pg_query/include/nodes/memnodes.h +108 -0
  166. data/ext/pg_query/include/nodes/nodeFuncs.h +162 -0
  167. data/ext/pg_query/include/nodes/nodes.h +842 -0
  168. data/ext/pg_query/include/nodes/params.h +170 -0
  169. data/ext/pg_query/include/nodes/parsenodes.h +3579 -0
  170. data/ext/pg_query/include/nodes/pathnodes.h +2556 -0
  171. data/ext/pg_query/include/nodes/pg_list.h +605 -0
  172. data/ext/pg_query/include/nodes/plannodes.h +1251 -0
  173. data/ext/pg_query/include/nodes/primnodes.h +1541 -0
  174. data/ext/pg_query/include/nodes/print.h +34 -0
  175. data/ext/pg_query/include/nodes/tidbitmap.h +75 -0
  176. data/ext/pg_query/include/nodes/value.h +61 -0
  177. data/ext/pg_query/include/optimizer/cost.h +206 -0
  178. data/ext/pg_query/include/optimizer/geqo.h +88 -0
  179. data/ext/pg_query/include/optimizer/geqo_gene.h +45 -0
  180. data/ext/pg_query/include/optimizer/optimizer.h +199 -0
  181. data/ext/pg_query/include/optimizer/paths.h +249 -0
  182. data/ext/pg_query/include/optimizer/planmain.h +119 -0
  183. data/ext/pg_query/include/parser/analyze.h +49 -0
  184. data/ext/pg_query/include/parser/gram.h +1067 -0
  185. data/ext/pg_query/include/parser/gramparse.h +75 -0
  186. data/ext/pg_query/include/parser/kwlist.h +477 -0
  187. data/ext/pg_query/include/parser/parse_agg.h +68 -0
  188. data/ext/pg_query/include/parser/parse_clause.h +54 -0
  189. data/ext/pg_query/include/parser/parse_coerce.h +97 -0
  190. data/ext/pg_query/include/parser/parse_collate.h +27 -0
  191. data/ext/pg_query/include/parser/parse_expr.h +26 -0
  192. data/ext/pg_query/include/parser/parse_func.h +73 -0
  193. data/ext/pg_query/include/parser/parse_node.h +327 -0
  194. data/ext/pg_query/include/parser/parse_oper.h +67 -0
  195. data/ext/pg_query/include/parser/parse_relation.h +123 -0
  196. data/ext/pg_query/include/parser/parse_target.h +46 -0
  197. data/ext/pg_query/include/parser/parse_type.h +60 -0
  198. data/ext/pg_query/include/parser/parser.h +41 -0
  199. data/ext/pg_query/include/parser/parsetree.h +61 -0
  200. data/ext/pg_query/include/parser/scanner.h +152 -0
  201. data/ext/pg_query/include/parser/scansup.h +30 -0
  202. data/ext/pg_query/include/partitioning/partdefs.h +26 -0
  203. data/ext/pg_query/include/pg_config.h +988 -0
  204. data/ext/pg_query/include/pg_config_ext.h +8 -0
  205. data/ext/pg_query/include/pg_config_manual.h +350 -0
  206. data/ext/pg_query/include/pg_config_os.h +8 -0
  207. data/ext/pg_query/include/pg_getopt.h +56 -0
  208. data/ext/pg_query/include/pg_query.h +121 -0
  209. data/ext/pg_query/include/pg_query_enum_defs.c +2454 -0
  210. data/ext/pg_query/include/pg_query_fingerprint_conds.c +875 -0
  211. data/ext/pg_query/include/pg_query_fingerprint_defs.c +12413 -0
  212. data/ext/pg_query/include/pg_query_json_helper.c +61 -0
  213. data/ext/pg_query/include/pg_query_outfuncs_conds.c +686 -0
  214. data/ext/pg_query/include/pg_query_outfuncs_defs.c +2437 -0
  215. data/ext/pg_query/include/pg_query_readfuncs_conds.c +222 -0
  216. data/ext/pg_query/include/pg_query_readfuncs_defs.c +2878 -0
  217. data/ext/pg_query/include/pg_trace.h +17 -0
  218. data/ext/pg_query/include/pgstat.h +1487 -0
  219. data/ext/pg_query/include/pgtime.h +84 -0
  220. data/ext/pg_query/include/pl_gram.h +385 -0
  221. data/ext/pg_query/include/pl_reserved_kwlist.h +52 -0
  222. data/ext/pg_query/include/pl_reserved_kwlist_d.h +114 -0
  223. data/ext/pg_query/include/pl_unreserved_kwlist.h +112 -0
  224. data/ext/pg_query/include/pl_unreserved_kwlist_d.h +246 -0
  225. data/ext/pg_query/include/plerrcodes.h +990 -0
  226. data/ext/pg_query/include/plpgsql.h +1347 -0
  227. data/ext/pg_query/include/port.h +524 -0
  228. data/ext/pg_query/include/port/atomics.h +524 -0
  229. data/ext/pg_query/include/port/atomics/arch-arm.h +26 -0
  230. data/ext/pg_query/include/port/atomics/arch-ppc.h +254 -0
  231. data/ext/pg_query/include/port/atomics/arch-x86.h +252 -0
  232. data/ext/pg_query/include/port/atomics/fallback.h +170 -0
  233. data/ext/pg_query/include/port/atomics/generic-gcc.h +286 -0
  234. data/ext/pg_query/include/port/atomics/generic.h +401 -0
  235. data/ext/pg_query/include/port/pg_bitutils.h +226 -0
  236. data/ext/pg_query/include/port/pg_bswap.h +161 -0
  237. data/ext/pg_query/include/port/pg_crc32c.h +101 -0
  238. data/ext/pg_query/include/portability/instr_time.h +256 -0
  239. data/ext/pg_query/include/postgres.h +764 -0
  240. data/ext/pg_query/include/postgres_ext.h +74 -0
  241. data/ext/pg_query/include/postmaster/autovacuum.h +83 -0
  242. data/ext/pg_query/include/postmaster/bgworker.h +161 -0
  243. data/ext/pg_query/include/postmaster/bgworker_internals.h +64 -0
  244. data/ext/pg_query/include/postmaster/bgwriter.h +45 -0
  245. data/ext/pg_query/include/postmaster/fork_process.h +17 -0
  246. data/ext/pg_query/include/postmaster/interrupt.h +32 -0
  247. data/ext/pg_query/include/postmaster/pgarch.h +39 -0
  248. data/ext/pg_query/include/postmaster/postmaster.h +77 -0
  249. data/ext/pg_query/include/postmaster/syslogger.h +98 -0
  250. data/ext/pg_query/include/postmaster/walwriter.h +21 -0
  251. data/ext/pg_query/include/protobuf-c.h +1106 -0
  252. data/ext/pg_query/include/protobuf-c/protobuf-c.h +1106 -0
  253. data/ext/pg_query/include/protobuf/pg_query.pb-c.h +10846 -0
  254. data/ext/pg_query/include/protobuf/pg_query.pb.h +124718 -0
  255. data/ext/pg_query/include/regex/regex.h +184 -0
  256. data/ext/pg_query/include/replication/logicallauncher.h +31 -0
  257. data/ext/pg_query/include/replication/logicalproto.h +110 -0
  258. data/ext/pg_query/include/replication/logicalworker.h +19 -0
  259. data/ext/pg_query/include/replication/origin.h +73 -0
  260. data/ext/pg_query/include/replication/reorderbuffer.h +467 -0
  261. data/ext/pg_query/include/replication/slot.h +219 -0
  262. data/ext/pg_query/include/replication/syncrep.h +115 -0
  263. data/ext/pg_query/include/replication/walreceiver.h +340 -0
  264. data/ext/pg_query/include/replication/walsender.h +74 -0
  265. data/ext/pg_query/include/rewrite/prs2lock.h +46 -0
  266. data/ext/pg_query/include/rewrite/rewriteHandler.h +40 -0
  267. data/ext/pg_query/include/rewrite/rewriteManip.h +87 -0
  268. data/ext/pg_query/include/rewrite/rewriteSupport.h +26 -0
  269. data/ext/pg_query/include/storage/backendid.h +37 -0
  270. data/ext/pg_query/include/storage/block.h +121 -0
  271. data/ext/pg_query/include/storage/buf.h +46 -0
  272. data/ext/pg_query/include/storage/bufmgr.h +292 -0
  273. data/ext/pg_query/include/storage/bufpage.h +459 -0
  274. data/ext/pg_query/include/storage/condition_variable.h +62 -0
  275. data/ext/pg_query/include/storage/dsm.h +61 -0
  276. data/ext/pg_query/include/storage/dsm_impl.h +75 -0
  277. data/ext/pg_query/include/storage/fd.h +168 -0
  278. data/ext/pg_query/include/storage/ipc.h +81 -0
  279. data/ext/pg_query/include/storage/item.h +19 -0
  280. data/ext/pg_query/include/storage/itemid.h +184 -0
  281. data/ext/pg_query/include/storage/itemptr.h +206 -0
  282. data/ext/pg_query/include/storage/large_object.h +100 -0
  283. data/ext/pg_query/include/storage/latch.h +190 -0
  284. data/ext/pg_query/include/storage/lmgr.h +114 -0
  285. data/ext/pg_query/include/storage/lock.h +612 -0
  286. data/ext/pg_query/include/storage/lockdefs.h +59 -0
  287. data/ext/pg_query/include/storage/lwlock.h +232 -0
  288. data/ext/pg_query/include/storage/lwlocknames.h +51 -0
  289. data/ext/pg_query/include/storage/off.h +57 -0
  290. data/ext/pg_query/include/storage/pg_sema.h +61 -0
  291. data/ext/pg_query/include/storage/pg_shmem.h +90 -0
  292. data/ext/pg_query/include/storage/pmsignal.h +94 -0
  293. data/ext/pg_query/include/storage/predicate.h +87 -0
  294. data/ext/pg_query/include/storage/proc.h +333 -0
  295. data/ext/pg_query/include/storage/proclist_types.h +51 -0
  296. data/ext/pg_query/include/storage/procsignal.h +75 -0
  297. data/ext/pg_query/include/storage/relfilenode.h +99 -0
  298. data/ext/pg_query/include/storage/s_lock.h +1047 -0
  299. data/ext/pg_query/include/storage/sharedfileset.h +45 -0
  300. data/ext/pg_query/include/storage/shm_mq.h +85 -0
  301. data/ext/pg_query/include/storage/shm_toc.h +58 -0
  302. data/ext/pg_query/include/storage/shmem.h +81 -0
  303. data/ext/pg_query/include/storage/sinval.h +153 -0
  304. data/ext/pg_query/include/storage/sinvaladt.h +43 -0
  305. data/ext/pg_query/include/storage/smgr.h +109 -0
  306. data/ext/pg_query/include/storage/spin.h +77 -0
  307. data/ext/pg_query/include/storage/standby.h +91 -0
  308. data/ext/pg_query/include/storage/standbydefs.h +74 -0
  309. data/ext/pg_query/include/storage/sync.h +62 -0
  310. data/ext/pg_query/include/tcop/cmdtag.h +58 -0
  311. data/ext/pg_query/include/tcop/cmdtaglist.h +217 -0
  312. data/ext/pg_query/include/tcop/deparse_utility.h +108 -0
  313. data/ext/pg_query/include/tcop/dest.h +149 -0
  314. data/ext/pg_query/include/tcop/fastpath.h +21 -0
  315. data/ext/pg_query/include/tcop/pquery.h +45 -0
  316. data/ext/pg_query/include/tcop/tcopprot.h +89 -0
  317. data/ext/pg_query/include/tcop/utility.h +108 -0
  318. data/ext/pg_query/include/tsearch/ts_cache.h +98 -0
  319. data/ext/pg_query/include/utils/acl.h +312 -0
  320. data/ext/pg_query/include/utils/aclchk_internal.h +45 -0
  321. data/ext/pg_query/include/utils/array.h +458 -0
  322. data/ext/pg_query/include/utils/builtins.h +127 -0
  323. data/ext/pg_query/include/utils/bytea.h +27 -0
  324. data/ext/pg_query/include/utils/catcache.h +231 -0
  325. data/ext/pg_query/include/utils/date.h +90 -0
  326. data/ext/pg_query/include/utils/datetime.h +343 -0
  327. data/ext/pg_query/include/utils/datum.h +68 -0
  328. data/ext/pg_query/include/utils/dsa.h +123 -0
  329. data/ext/pg_query/include/utils/dynahash.h +19 -0
  330. data/ext/pg_query/include/utils/elog.h +439 -0
  331. data/ext/pg_query/include/utils/errcodes.h +352 -0
  332. data/ext/pg_query/include/utils/expandeddatum.h +159 -0
  333. data/ext/pg_query/include/utils/expandedrecord.h +231 -0
  334. data/ext/pg_query/include/utils/float.h +356 -0
  335. data/ext/pg_query/include/utils/fmgroids.h +2657 -0
  336. data/ext/pg_query/include/utils/fmgrprotos.h +2646 -0
  337. data/ext/pg_query/include/utils/fmgrtab.h +48 -0
  338. data/ext/pg_query/include/utils/guc.h +443 -0
  339. data/ext/pg_query/include/utils/guc_tables.h +272 -0
  340. data/ext/pg_query/include/utils/hsearch.h +149 -0
  341. data/ext/pg_query/include/utils/inval.h +64 -0
  342. data/ext/pg_query/include/utils/lsyscache.h +197 -0
  343. data/ext/pg_query/include/utils/memdebug.h +82 -0
  344. data/ext/pg_query/include/utils/memutils.h +225 -0
  345. data/ext/pg_query/include/utils/numeric.h +76 -0
  346. data/ext/pg_query/include/utils/palloc.h +136 -0
  347. data/ext/pg_query/include/utils/partcache.h +102 -0
  348. data/ext/pg_query/include/utils/pg_locale.h +119 -0
  349. data/ext/pg_query/include/utils/pg_lsn.h +29 -0
  350. data/ext/pg_query/include/utils/pidfile.h +56 -0
  351. data/ext/pg_query/include/utils/plancache.h +235 -0
  352. data/ext/pg_query/include/utils/portal.h +241 -0
  353. data/ext/pg_query/include/utils/probes.h +114 -0
  354. data/ext/pg_query/include/utils/ps_status.h +25 -0
  355. data/ext/pg_query/include/utils/queryenvironment.h +74 -0
  356. data/ext/pg_query/include/utils/regproc.h +28 -0
  357. data/ext/pg_query/include/utils/rel.h +644 -0
  358. data/ext/pg_query/include/utils/relcache.h +151 -0
  359. data/ext/pg_query/include/utils/reltrigger.h +81 -0
  360. data/ext/pg_query/include/utils/resowner.h +86 -0
  361. data/ext/pg_query/include/utils/rls.h +50 -0
  362. data/ext/pg_query/include/utils/ruleutils.h +44 -0
  363. data/ext/pg_query/include/utils/sharedtuplestore.h +61 -0
  364. data/ext/pg_query/include/utils/snapmgr.h +158 -0
  365. data/ext/pg_query/include/utils/snapshot.h +206 -0
  366. data/ext/pg_query/include/utils/sortsupport.h +276 -0
  367. data/ext/pg_query/include/utils/syscache.h +219 -0
  368. data/ext/pg_query/include/utils/timeout.h +88 -0
  369. data/ext/pg_query/include/utils/timestamp.h +116 -0
  370. data/ext/pg_query/include/utils/tuplesort.h +277 -0
  371. data/ext/pg_query/include/utils/tuplestore.h +91 -0
  372. data/ext/pg_query/include/utils/typcache.h +202 -0
  373. data/ext/pg_query/include/utils/tzparser.h +39 -0
  374. data/ext/pg_query/include/utils/varlena.h +39 -0
  375. data/ext/pg_query/include/utils/xml.h +84 -0
  376. data/ext/pg_query/include/xxhash.h +5445 -0
  377. data/ext/pg_query/include/xxhash/xxhash.h +5445 -0
  378. data/ext/pg_query/pg_query.c +104 -0
  379. data/ext/pg_query/pg_query.pb-c.c +37628 -0
  380. data/ext/pg_query/pg_query_deparse.c +9953 -0
  381. data/ext/pg_query/pg_query_fingerprint.c +292 -0
  382. data/ext/pg_query/pg_query_fingerprint.h +8 -0
  383. data/ext/pg_query/pg_query_internal.h +24 -0
  384. data/ext/pg_query/pg_query_json_plpgsql.c +738 -0
  385. data/ext/pg_query/pg_query_json_plpgsql.h +9 -0
  386. data/ext/pg_query/pg_query_normalize.c +437 -0
  387. data/ext/pg_query/pg_query_outfuncs.h +10 -0
  388. data/ext/pg_query/pg_query_outfuncs_json.c +297 -0
  389. data/ext/pg_query/pg_query_outfuncs_protobuf.c +237 -0
  390. data/ext/pg_query/pg_query_parse.c +148 -0
  391. data/ext/pg_query/pg_query_parse_plpgsql.c +460 -0
  392. data/ext/pg_query/pg_query_readfuncs.h +11 -0
  393. data/ext/pg_query/pg_query_readfuncs_protobuf.c +142 -0
  394. data/ext/pg_query/pg_query_ruby.c +108 -12
  395. data/ext/pg_query/pg_query_scan.c +173 -0
  396. data/ext/pg_query/pg_query_split.c +221 -0
  397. data/ext/pg_query/protobuf-c.c +3660 -0
  398. data/ext/pg_query/src_backend_catalog_namespace.c +1051 -0
  399. data/ext/pg_query/src_backend_catalog_pg_proc.c +142 -0
  400. data/ext/pg_query/src_backend_commands_define.c +117 -0
  401. data/ext/pg_query/src_backend_libpq_pqcomm.c +651 -0
  402. data/ext/pg_query/src_backend_nodes_bitmapset.c +513 -0
  403. data/ext/pg_query/src_backend_nodes_copyfuncs.c +6013 -0
  404. data/ext/pg_query/src_backend_nodes_equalfuncs.c +4003 -0
  405. data/ext/pg_query/src_backend_nodes_extensible.c +99 -0
  406. data/ext/pg_query/src_backend_nodes_list.c +922 -0
  407. data/ext/pg_query/src_backend_nodes_makefuncs.c +417 -0
  408. data/ext/pg_query/src_backend_nodes_nodeFuncs.c +1363 -0
  409. data/ext/pg_query/src_backend_nodes_value.c +84 -0
  410. data/ext/pg_query/src_backend_parser_gram.c +47456 -0
  411. data/ext/pg_query/src_backend_parser_parse_expr.c +313 -0
  412. data/ext/pg_query/src_backend_parser_parser.c +497 -0
  413. data/ext/pg_query/src_backend_parser_scan.c +7091 -0
  414. data/ext/pg_query/src_backend_parser_scansup.c +160 -0
  415. data/ext/pg_query/src_backend_postmaster_postmaster.c +2230 -0
  416. data/ext/pg_query/src_backend_storage_ipc_ipc.c +192 -0
  417. data/ext/pg_query/src_backend_storage_lmgr_s_lock.c +370 -0
  418. data/ext/pg_query/src_backend_tcop_postgres.c +776 -0
  419. data/ext/pg_query/src_backend_utils_adt_datum.c +326 -0
  420. data/ext/pg_query/src_backend_utils_adt_expandeddatum.c +98 -0
  421. data/ext/pg_query/src_backend_utils_adt_format_type.c +136 -0
  422. data/ext/pg_query/src_backend_utils_adt_ruleutils.c +1683 -0
  423. data/ext/pg_query/src_backend_utils_error_assert.c +74 -0
  424. data/ext/pg_query/src_backend_utils_error_elog.c +1748 -0
  425. data/ext/pg_query/src_backend_utils_fmgr_fmgr.c +570 -0
  426. data/ext/pg_query/src_backend_utils_hash_dynahash.c +1086 -0
  427. data/ext/pg_query/src_backend_utils_init_globals.c +168 -0
  428. data/ext/pg_query/src_backend_utils_mb_mbutils.c +839 -0
  429. data/ext/pg_query/src_backend_utils_misc_guc.c +1831 -0
  430. data/ext/pg_query/src_backend_utils_mmgr_aset.c +1560 -0
  431. data/ext/pg_query/src_backend_utils_mmgr_mcxt.c +1006 -0
  432. data/ext/pg_query/src_common_encnames.c +158 -0
  433. data/ext/pg_query/src_common_keywords.c +39 -0
  434. data/ext/pg_query/src_common_kwlist_d.h +1081 -0
  435. data/ext/pg_query/src_common_kwlookup.c +91 -0
  436. data/ext/pg_query/src_common_psprintf.c +158 -0
  437. data/ext/pg_query/src_common_string.c +86 -0
  438. data/ext/pg_query/src_common_stringinfo.c +336 -0
  439. data/ext/pg_query/src_common_wchar.c +1651 -0
  440. data/ext/pg_query/src_pl_plpgsql_src_pl_comp.c +1133 -0
  441. data/ext/pg_query/src_pl_plpgsql_src_pl_funcs.c +877 -0
  442. data/ext/pg_query/src_pl_plpgsql_src_pl_gram.c +6533 -0
  443. data/ext/pg_query/src_pl_plpgsql_src_pl_handler.c +107 -0
  444. data/ext/pg_query/src_pl_plpgsql_src_pl_reserved_kwlist_d.h +123 -0
  445. data/ext/pg_query/src_pl_plpgsql_src_pl_scanner.c +671 -0
  446. data/ext/pg_query/src_pl_plpgsql_src_pl_unreserved_kwlist_d.h +255 -0
  447. data/ext/pg_query/src_port_erand48.c +127 -0
  448. data/ext/pg_query/src_port_pg_bitutils.c +246 -0
  449. data/ext/pg_query/src_port_pgsleep.c +69 -0
  450. data/ext/pg_query/src_port_pgstrcasecmp.c +83 -0
  451. data/ext/pg_query/src_port_qsort.c +240 -0
  452. data/ext/pg_query/src_port_random.c +31 -0
  453. data/ext/pg_query/src_port_snprintf.c +1449 -0
  454. data/ext/pg_query/src_port_strerror.c +324 -0
  455. data/ext/pg_query/src_port_strnlen.c +39 -0
  456. data/ext/pg_query/xxhash.c +43 -0
  457. data/lib/pg_query.rb +7 -4
  458. data/lib/pg_query/constants.rb +21 -0
  459. data/lib/pg_query/deparse.rb +16 -1117
  460. data/lib/pg_query/filter_columns.rb +86 -85
  461. data/lib/pg_query/fingerprint.rb +122 -87
  462. data/lib/pg_query/json_field_names.rb +1402 -0
  463. data/lib/pg_query/node.rb +31 -0
  464. data/lib/pg_query/param_refs.rb +42 -37
  465. data/lib/pg_query/parse.rb +220 -200
  466. data/lib/pg_query/parse_error.rb +1 -1
  467. data/lib/pg_query/pg_query_pb.rb +3211 -0
  468. data/lib/pg_query/scan.rb +23 -0
  469. data/lib/pg_query/treewalker.rb +24 -40
  470. data/lib/pg_query/truncate.rb +64 -43
  471. data/lib/pg_query/version.rb +2 -2
  472. metadata +473 -11
  473. data/ext/pg_query/pg_query_ruby.h +0 -10
  474. data/lib/pg_query/deep_dup.rb +0 -16
  475. data/lib/pg_query/deparse/alter_table.rb +0 -42
  476. data/lib/pg_query/deparse/interval.rb +0 -105
  477. data/lib/pg_query/legacy_parsetree.rb +0 -109
  478. data/lib/pg_query/node_types.rb +0 -284
@@ -0,0 +1,1560 @@
1
+ /*--------------------------------------------------------------------
2
+ * Symbols referenced in this file:
3
+ * - AllocSetContextCreateInternal
4
+ * - context_freelists
5
+ * - AllocSetMethods
6
+ * - AllocSetAlloc
7
+ * - AllocSetFreeIndex
8
+ * - AllocSetFree
9
+ * - AllocSetRealloc
10
+ * - AllocSetReset
11
+ * - AllocSetDelete
12
+ * - AllocSetGetChunkSpace
13
+ * - AllocSetIsEmpty
14
+ * - AllocSetStats
15
+ * - AllocSetCheck
16
+ * - AllocSetDeleteFreeList
17
+ *--------------------------------------------------------------------
18
+ */
19
+
20
+ /*-------------------------------------------------------------------------
21
+ *
22
+ * aset.c
23
+ * Allocation set definitions.
24
+ *
25
+ * AllocSet is our standard implementation of the abstract MemoryContext
26
+ * type.
27
+ *
28
+ *
29
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
30
+ * Portions Copyright (c) 1994, Regents of the University of California
31
+ *
32
+ * IDENTIFICATION
33
+ * src/backend/utils/mmgr/aset.c
34
+ *
35
+ * NOTE:
36
+ * This is a new (Feb. 05, 1999) implementation of the allocation set
37
+ * routines. AllocSet...() does not use OrderedSet...() any more.
38
+ * Instead it manages allocations in a block pool by itself, combining
39
+ * many small allocations in a few bigger blocks. AllocSetFree() normally
40
+ * doesn't free() memory really. It just add's the free'd area to some
41
+ * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
42
+ * at once on AllocSetReset(), which happens when the memory context gets
43
+ * destroyed.
44
+ * Jan Wieck
45
+ *
46
+ * Performance improvement from Tom Lane, 8/99: for extremely large request
47
+ * sizes, we do want to be able to give the memory back to free() as soon
48
+ * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
49
+ * freelist entries that might never be usable. This is specially needed
50
+ * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
51
+ * the previous instances of the block were guaranteed to be wasted until
52
+ * AllocSetReset() under the old way.
53
+ *
54
+ * Further improvement 12/00: as the code stood, request sizes in the
55
+ * midrange between "small" and "large" were handled very inefficiently,
56
+ * because any sufficiently large free chunk would be used to satisfy a
57
+ * request, even if it was much larger than necessary. This led to more
58
+ * and more wasted space in allocated chunks over time. To fix, get rid
59
+ * of the midrange behavior: we now handle only "small" power-of-2-size
60
+ * chunks as chunks. Anything "large" is passed off to malloc(). Change
61
+ * the number of freelists to change the small/large boundary.
62
+ *
63
+ *-------------------------------------------------------------------------
64
+ */
65
+
66
+ #include "postgres.h"
67
+
68
+ #include "port/pg_bitutils.h"
69
+ #include "utils/memdebug.h"
70
+ #include "utils/memutils.h"
71
+
72
+ /*--------------------
73
+ * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
74
+ * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
75
+ *
76
+ * Note that all chunks in the freelists have power-of-2 sizes. This
77
+ * improves recyclability: we may waste some space, but the wasted space
78
+ * should stay pretty constant as requests are made and released.
79
+ *
80
+ * A request too large for the last freelist is handled by allocating a
81
+ * dedicated block from malloc(). The block still has a block header and
82
+ * chunk header, but when the chunk is freed we'll return the whole block
83
+ * to malloc(), not put it on our freelists.
84
+ *
85
+ * CAUTION: ALLOC_MINBITS must be large enough so that
86
+ * 1<<ALLOC_MINBITS is at least MAXALIGN,
87
+ * or we may fail to align the smallest chunks adequately.
88
+ * 8-byte alignment is enough on all currently known machines.
89
+ *
90
+ * With the current parameters, request sizes up to 8K are treated as chunks,
91
+ * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
92
+ * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
93
+ * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
94
+ * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
95
+ *--------------------
96
+ */
97
+
98
+ #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
99
+ #define ALLOCSET_NUM_FREELISTS 11
100
+ #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
101
+ /* Size of largest chunk that we use a fixed size for */
102
+ #define ALLOC_CHUNK_FRACTION 4
103
+ /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
104
+
105
+ /*--------------------
106
+ * The first block allocated for an allocset has size initBlockSize.
107
+ * Each time we have to allocate another block, we double the block size
108
+ * (if possible, and without exceeding maxBlockSize), so as to reduce
109
+ * the bookkeeping load on malloc().
110
+ *
111
+ * Blocks allocated to hold oversize chunks do not follow this rule, however;
112
+ * they are just however big they need to be to hold that single chunk.
113
+ *
114
+ * Also, if a minContextSize is specified, the first block has that size,
115
+ * and then initBlockSize is used for the next one.
116
+ *--------------------
117
+ */
118
+
119
+ #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
120
+ #define ALLOC_CHUNKHDRSZ sizeof(struct AllocChunkData)
121
+
122
+ typedef struct AllocBlockData *AllocBlock; /* forward reference */
123
+ typedef struct AllocChunkData *AllocChunk;
124
+
125
+ /*
126
+ * AllocPointer
127
+ * Aligned pointer which may be a member of an allocation set.
128
+ */
129
+ typedef void *AllocPointer;
130
+
131
+ /*
132
+ * AllocSetContext is our standard implementation of MemoryContext.
133
+ *
134
+ * Note: header.isReset means there is nothing for AllocSetReset to do.
135
+ * This is different from the aset being physically empty (empty blocks list)
136
+ * because we will still have a keeper block. It's also different from the set
137
+ * being logically empty, because we don't attempt to detect pfree'ing the
138
+ * last active chunk.
139
+ */
140
+ typedef struct AllocSetContext
141
+ {
142
+ MemoryContextData header; /* Standard memory-context fields */
143
+ /* Info about storage allocated in this context: */
144
+ AllocBlock blocks; /* head of list of blocks in this set */
145
+ AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
146
+ /* Allocation parameters for this context: */
147
+ Size initBlockSize; /* initial block size */
148
+ Size maxBlockSize; /* maximum block size */
149
+ Size nextBlockSize; /* next block size to allocate */
150
+ Size allocChunkLimit; /* effective chunk size limit */
151
+ AllocBlock keeper; /* keep this block over resets */
152
+ /* freelist this context could be put in, or -1 if not a candidate: */
153
+ int freeListIndex; /* index in context_freelists[], or -1 */
154
+ } AllocSetContext;
155
+
156
+ typedef AllocSetContext *AllocSet;
157
+
158
+ /*
159
+ * AllocBlock
160
+ * An AllocBlock is the unit of memory that is obtained by aset.c
161
+ * from malloc(). It contains one or more AllocChunks, which are
162
+ * the units requested by palloc() and freed by pfree(). AllocChunks
163
+ * cannot be returned to malloc() individually, instead they are put
164
+ * on freelists by pfree() and re-used by the next palloc() that has
165
+ * a matching request size.
166
+ *
167
+ * AllocBlockData is the header data for a block --- the usable space
168
+ * within the block begins at the next alignment boundary.
169
+ */
170
+ typedef struct AllocBlockData
171
+ {
172
+ AllocSet aset; /* aset that owns this block */
173
+ AllocBlock prev; /* prev block in aset's blocks list, if any */
174
+ AllocBlock next; /* next block in aset's blocks list, if any */
175
+ char *freeptr; /* start of free space in this block */
176
+ char *endptr; /* end of space in this block */
177
+ } AllocBlockData;
178
+
179
+ /*
180
+ * AllocChunk
181
+ * The prefix of each piece of memory in an AllocBlock
182
+ *
183
+ * Note: to meet the memory context APIs, the payload area of the chunk must
184
+ * be maxaligned, and the "aset" link must be immediately adjacent to the
185
+ * payload area (cf. GetMemoryChunkContext). We simplify matters for this
186
+ * module by requiring sizeof(AllocChunkData) to be maxaligned, and then
187
+ * we can ensure things work by adding any required alignment padding before
188
+ * the "aset" field. There is a static assertion below that the alignment
189
+ * is done correctly.
190
+ */
191
+ typedef struct AllocChunkData
192
+ {
193
+ /* size is always the size of the usable space in the chunk */
194
+ Size size;
195
+ #ifdef MEMORY_CONTEXT_CHECKING
196
+ /* when debugging memory usage, also store actual requested size */
197
+ /* this is zero in a free chunk */
198
+ Size requested_size;
199
+
200
+ #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T * 2 + SIZEOF_VOID_P)
201
+ #else
202
+ #define ALLOCCHUNK_RAWSIZE (SIZEOF_SIZE_T + SIZEOF_VOID_P)
203
+ #endif /* MEMORY_CONTEXT_CHECKING */
204
+
205
+ /* ensure proper alignment by adding padding if needed */
206
+ #if (ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF) != 0
207
+ char padding[MAXIMUM_ALIGNOF - ALLOCCHUNK_RAWSIZE % MAXIMUM_ALIGNOF];
208
+ #endif
209
+
210
+ /* aset is the owning aset if allocated, or the freelist link if free */
211
+ void *aset;
212
+ /* there must not be any padding to reach a MAXALIGN boundary here! */
213
+ } AllocChunkData;
214
+
215
+ /*
216
+ * Only the "aset" field should be accessed outside this module.
217
+ * We keep the rest of an allocated chunk's header marked NOACCESS when using
218
+ * valgrind. But note that chunk headers that are in a freelist are kept
219
+ * accessible, for simplicity.
220
+ */
221
+ #define ALLOCCHUNK_PRIVATE_LEN offsetof(AllocChunkData, aset)
222
+
223
+ /*
224
+ * AllocPointerIsValid
225
+ * True iff pointer is valid allocation pointer.
226
+ */
227
+ #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
228
+
229
+ /*
230
+ * AllocSetIsValid
231
+ * True iff set is valid allocation set.
232
+ */
233
+ #define AllocSetIsValid(set) PointerIsValid(set)
234
+
235
+ #define AllocPointerGetChunk(ptr) \
236
+ ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
237
+ #define AllocChunkGetPointer(chk) \
238
+ ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
239
+
240
+ /*
241
+ * Rather than repeatedly creating and deleting memory contexts, we keep some
242
+ * freed contexts in freelists so that we can hand them out again with little
243
+ * work. Before putting a context in a freelist, we reset it so that it has
244
+ * only its initial malloc chunk and no others. To be a candidate for a
245
+ * freelist, a context must have the same minContextSize/initBlockSize as
246
+ * other contexts in the list; but its maxBlockSize is irrelevant since that
247
+ * doesn't affect the size of the initial chunk.
248
+ *
249
+ * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
250
+ * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
251
+ * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
252
+ *
253
+ * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
254
+ * hopes of improving locality of reference. But if there get to be too
255
+ * many contexts in the list, we'd prefer to drop the most-recently-created
256
+ * contexts in hopes of keeping the process memory map compact.
257
+ * We approximate that by simply deleting all existing entries when the list
258
+ * overflows, on the assumption that queries that allocate a lot of contexts
259
+ * will probably free them in more or less reverse order of allocation.
260
+ *
261
+ * Contexts in a freelist are chained via their nextchild pointers.
262
+ */
263
+ #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
264
+
265
+ typedef struct AllocSetFreeList
266
+ {
267
+ int num_free; /* current list length */
268
+ AllocSetContext *first_free; /* list header */
269
+ } AllocSetFreeList;
270
+
271
+ /* context_freelists[0] is for default params, [1] for small params */
272
+ static __thread AllocSetFreeList context_freelists[2] =
273
+ {
274
+ {
275
+ 0, NULL
276
+ },
277
+ {
278
+ 0, NULL
279
+ }
280
+ };
281
+
282
+
283
+ /*
284
+ * These functions implement the MemoryContext API for AllocSet contexts.
285
+ */
286
+ static void *AllocSetAlloc(MemoryContext context, Size size);
287
+ static void AllocSetFree(MemoryContext context, void *pointer);
288
+ static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
289
+ static void AllocSetReset(MemoryContext context);
290
+ static void AllocSetDelete(MemoryContext context);
291
+ static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
292
+ static bool AllocSetIsEmpty(MemoryContext context);
293
+ static void AllocSetStats(MemoryContext context,
294
+ MemoryStatsPrintFunc printfunc, void *passthru,
295
+ MemoryContextCounters *totals);
296
+
297
+ #ifdef MEMORY_CONTEXT_CHECKING
298
+ static void AllocSetCheck(MemoryContext context);
299
+ #endif
300
+
301
+ /*
302
+ * This is the virtual function table for AllocSet contexts.
303
+ */
304
+ static const MemoryContextMethods AllocSetMethods = {
305
+ AllocSetAlloc,
306
+ AllocSetFree,
307
+ AllocSetRealloc,
308
+ AllocSetReset,
309
+ AllocSetDelete,
310
+ AllocSetGetChunkSpace,
311
+ AllocSetIsEmpty,
312
+ AllocSetStats
313
+ #ifdef MEMORY_CONTEXT_CHECKING
314
+ ,AllocSetCheck
315
+ #endif
316
+ };
317
+
318
+
319
+ /* ----------
320
+ * AllocSetFreeIndex -
321
+ *
322
+ * Depending on the size of an allocation compute which freechunk
323
+ * list of the alloc set it belongs to. Caller must have verified
324
+ * that size <= ALLOC_CHUNK_LIMIT.
325
+ * ----------
326
+ */
327
+ static inline int
328
+ AllocSetFreeIndex(Size size)
329
+ {
330
+ int idx;
331
+
332
+ if (size > (1 << ALLOC_MINBITS))
333
+ {
334
+ /*----------
335
+ * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
336
+ * This is the same as
337
+ * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
338
+ * or equivalently
339
+ * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
340
+ *
341
+ * However, rather than just calling that function, we duplicate the
342
+ * logic here, allowing an additional optimization. It's reasonable
343
+ * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
344
+ * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
345
+ * the last two bytes.
346
+ *
347
+ * Yes, this function is enough of a hot-spot to make it worth this
348
+ * much trouble.
349
+ *----------
350
+ */
351
+ #ifdef HAVE__BUILTIN_CLZ
352
+ idx = 31 - __builtin_clz((uint32) size - 1) - ALLOC_MINBITS + 1;
353
+ #else
354
+ uint32 t,
355
+ tsize;
356
+
357
+ /* Statically assert that we only have a 16-bit input value. */
358
+ StaticAssertStmt(ALLOC_CHUNK_LIMIT < (1 << 16),
359
+ "ALLOC_CHUNK_LIMIT must be less than 64kB");
360
+
361
+ tsize = size - 1;
362
+ t = tsize >> 8;
363
+ idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
364
+ idx -= ALLOC_MINBITS - 1;
365
+ #endif
366
+
367
+ Assert(idx < ALLOCSET_NUM_FREELISTS);
368
+ }
369
+ else
370
+ idx = 0;
371
+
372
+ return idx;
373
+ }
374
+
375
+
376
+ /*
377
+ * Public routines
378
+ */
379
+
380
+
381
+ /*
382
+ * AllocSetContextCreateInternal
383
+ * Create a new AllocSet context.
384
+ *
385
+ * parent: parent context, or NULL if top-level context
386
+ * name: name of context (must be statically allocated)
387
+ * minContextSize: minimum context size
388
+ * initBlockSize: initial allocation block size
389
+ * maxBlockSize: maximum allocation block size
390
+ *
391
+ * Most callers should abstract the context size parameters using a macro
392
+ * such as ALLOCSET_DEFAULT_SIZES.
393
+ *
394
+ * Note: don't call this directly; go through the wrapper macro
395
+ * AllocSetContextCreate.
396
+ */
397
+ MemoryContext
398
+ AllocSetContextCreateInternal(MemoryContext parent,
399
+ const char *name,
400
+ Size minContextSize,
401
+ Size initBlockSize,
402
+ Size maxBlockSize)
403
+ {
404
+ int freeListIndex;
405
+ Size firstBlockSize;
406
+ AllocSet set;
407
+ AllocBlock block;
408
+
409
+ /* Assert we padded AllocChunkData properly */
410
+ StaticAssertStmt(ALLOC_CHUNKHDRSZ == MAXALIGN(ALLOC_CHUNKHDRSZ),
411
+ "sizeof(AllocChunkData) is not maxaligned");
412
+ StaticAssertStmt(offsetof(AllocChunkData, aset) + sizeof(MemoryContext) ==
413
+ ALLOC_CHUNKHDRSZ,
414
+ "padding calculation in AllocChunkData is wrong");
415
+
416
+ /*
417
+ * First, validate allocation parameters. Once these were regular runtime
418
+ * test and elog's, but in practice Asserts seem sufficient because nobody
419
+ * varies their parameters at runtime. We somewhat arbitrarily enforce a
420
+ * minimum 1K block size.
421
+ */
422
+ Assert(initBlockSize == MAXALIGN(initBlockSize) &&
423
+ initBlockSize >= 1024);
424
+ Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
425
+ maxBlockSize >= initBlockSize &&
426
+ AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
427
+ Assert(minContextSize == 0 ||
428
+ (minContextSize == MAXALIGN(minContextSize) &&
429
+ minContextSize >= 1024 &&
430
+ minContextSize <= maxBlockSize));
431
+
432
+ /*
433
+ * Check whether the parameters match either available freelist. We do
434
+ * not need to demand a match of maxBlockSize.
435
+ */
436
+ if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
437
+ initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
438
+ freeListIndex = 0;
439
+ else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
440
+ initBlockSize == ALLOCSET_SMALL_INITSIZE)
441
+ freeListIndex = 1;
442
+ else
443
+ freeListIndex = -1;
444
+
445
+ /*
446
+ * If a suitable freelist entry exists, just recycle that context.
447
+ */
448
+ if (freeListIndex >= 0)
449
+ {
450
+ AllocSetFreeList *freelist = &context_freelists[freeListIndex];
451
+
452
+ if (freelist->first_free != NULL)
453
+ {
454
+ /* Remove entry from freelist */
455
+ set = freelist->first_free;
456
+ freelist->first_free = (AllocSet) set->header.nextchild;
457
+ freelist->num_free--;
458
+
459
+ /* Update its maxBlockSize; everything else should be OK */
460
+ set->maxBlockSize = maxBlockSize;
461
+
462
+ /* Reinitialize its header, installing correct name and parent */
463
+ MemoryContextCreate((MemoryContext) set,
464
+ T_AllocSetContext,
465
+ &AllocSetMethods,
466
+ parent,
467
+ name);
468
+
469
+ ((MemoryContext) set)->mem_allocated =
470
+ set->keeper->endptr - ((char *) set);
471
+
472
+ return (MemoryContext) set;
473
+ }
474
+ }
475
+
476
+ /* Determine size of initial block */
477
+ firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
478
+ ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
479
+ if (minContextSize != 0)
480
+ firstBlockSize = Max(firstBlockSize, minContextSize);
481
+ else
482
+ firstBlockSize = Max(firstBlockSize, initBlockSize);
483
+
484
+ /*
485
+ * Allocate the initial block. Unlike other aset.c blocks, it starts with
486
+ * the context header and its block header follows that.
487
+ */
488
+ set = (AllocSet) malloc(firstBlockSize);
489
+ if (set == NULL)
490
+ {
491
+ if (TopMemoryContext)
492
+ MemoryContextStats(TopMemoryContext);
493
+ ereport(ERROR,
494
+ (errcode(ERRCODE_OUT_OF_MEMORY),
495
+ errmsg("out of memory"),
496
+ errdetail("Failed while creating memory context \"%s\".",
497
+ name)));
498
+ }
499
+
500
+ /*
501
+ * Avoid writing code that can fail between here and MemoryContextCreate;
502
+ * we'd leak the header/initial block if we ereport in this stretch.
503
+ */
504
+
505
+ /* Fill in the initial block's block header */
506
+ block = (AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext)));
507
+ block->aset = set;
508
+ block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
509
+ block->endptr = ((char *) set) + firstBlockSize;
510
+ block->prev = NULL;
511
+ block->next = NULL;
512
+
513
+ /* Mark unallocated space NOACCESS; leave the block header alone. */
514
+ VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
515
+
516
+ /* Remember block as part of block list */
517
+ set->blocks = block;
518
+ /* Mark block as not to be released at reset time */
519
+ set->keeper = block;
520
+
521
+ /* Finish filling in aset-specific parts of the context header */
522
+ MemSetAligned(set->freelist, 0, sizeof(set->freelist));
523
+
524
+ set->initBlockSize = initBlockSize;
525
+ set->maxBlockSize = maxBlockSize;
526
+ set->nextBlockSize = initBlockSize;
527
+ set->freeListIndex = freeListIndex;
528
+
529
+ /*
530
+ * Compute the allocation chunk size limit for this context. It can't be
531
+ * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
532
+ * If maxBlockSize is small then requests exceeding the maxBlockSize, or
533
+ * even a significant fraction of it, should be treated as large chunks
534
+ * too. For the typical case of maxBlockSize a power of 2, the chunk size
535
+ * limit will be at most 1/8th maxBlockSize, so that given a stream of
536
+ * requests that are all the maximum chunk size we will waste at most
537
+ * 1/8th of the allocated space.
538
+ *
539
+ * We have to have allocChunkLimit a power of two, because the requested
540
+ * and actually-allocated sizes of any chunk must be on the same side of
541
+ * the limit, else we get confused about whether the chunk is "big".
542
+ *
543
+ * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
544
+ */
545
+ StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
546
+ "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
547
+
548
+ set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
549
+ while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
550
+ (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
551
+ set->allocChunkLimit >>= 1;
552
+
553
+ /* Finally, do the type-independent part of context creation */
554
+ MemoryContextCreate((MemoryContext) set,
555
+ T_AllocSetContext,
556
+ &AllocSetMethods,
557
+ parent,
558
+ name);
559
+
560
+ ((MemoryContext) set)->mem_allocated = firstBlockSize;
561
+
562
+ return (MemoryContext) set;
563
+ }
564
+
565
+ /*
566
+ * AllocSetReset
567
+ * Frees all memory which is allocated in the given set.
568
+ *
569
+ * Actually, this routine has some discretion about what to do.
570
+ * It should mark all allocated chunks freed, but it need not necessarily
571
+ * give back all the resources the set owns. Our actual implementation is
572
+ * that we give back all but the "keeper" block (which we must keep, since
573
+ * it shares a malloc chunk with the context header). In this way, we don't
574
+ * thrash malloc() when a context is repeatedly reset after small allocations,
575
+ * which is typical behavior for per-tuple contexts.
576
+ */
577
+ static void
578
+ AllocSetReset(MemoryContext context)
579
+ {
580
+ AllocSet set = (AllocSet) context;
581
+ AllocBlock block;
582
+ Size keepersize PG_USED_FOR_ASSERTS_ONLY
583
+ = set->keeper->endptr - ((char *) set);
584
+
585
+ AssertArg(AllocSetIsValid(set));
586
+
587
+ #ifdef MEMORY_CONTEXT_CHECKING
588
+ /* Check for corruption and leaks before freeing */
589
+ AllocSetCheck(context);
590
+ #endif
591
+
592
+ /* Clear chunk freelists */
593
+ MemSetAligned(set->freelist, 0, sizeof(set->freelist));
594
+
595
+ block = set->blocks;
596
+
597
+ /* New blocks list will be just the keeper block */
598
+ set->blocks = set->keeper;
599
+
600
+ while (block != NULL)
601
+ {
602
+ AllocBlock next = block->next;
603
+
604
+ if (block == set->keeper)
605
+ {
606
+ /* Reset the block, but don't return it to malloc */
607
+ char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
608
+
609
+ #ifdef CLOBBER_FREED_MEMORY
610
+ wipe_mem(datastart, block->freeptr - datastart);
611
+ #else
612
+ /* wipe_mem() would have done this */
613
+ VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
614
+ #endif
615
+ block->freeptr = datastart;
616
+ block->prev = NULL;
617
+ block->next = NULL;
618
+ }
619
+ else
620
+ {
621
+ /* Normal case, release the block */
622
+ context->mem_allocated -= block->endptr - ((char *) block);
623
+
624
+ #ifdef CLOBBER_FREED_MEMORY
625
+ wipe_mem(block, block->freeptr - ((char *) block));
626
+ #endif
627
+ free(block);
628
+ }
629
+ block = next;
630
+ }
631
+
632
+ Assert(context->mem_allocated == keepersize);
633
+
634
+ /* Reset block size allocation sequence, too */
635
+ set->nextBlockSize = set->initBlockSize;
636
+ }
637
+
638
+ /*
639
+ * AllocSetDelete
640
+ * Frees all memory which is allocated in the given set,
641
+ * in preparation for deletion of the set.
642
+ *
643
+ * Unlike AllocSetReset, this *must* free all resources of the set.
644
+ */
645
+ static void
646
+ AllocSetDelete(MemoryContext context)
647
+ {
648
+ AllocSet set = (AllocSet) context;
649
+ AllocBlock block = set->blocks;
650
+ Size keepersize PG_USED_FOR_ASSERTS_ONLY
651
+ = set->keeper->endptr - ((char *) set);
652
+
653
+ AssertArg(AllocSetIsValid(set));
654
+
655
+ #ifdef MEMORY_CONTEXT_CHECKING
656
+ /* Check for corruption and leaks before freeing */
657
+ AllocSetCheck(context);
658
+ #endif
659
+
660
+ /*
661
+ * If the context is a candidate for a freelist, put it into that freelist
662
+ * instead of destroying it.
663
+ */
664
+ if (set->freeListIndex >= 0)
665
+ {
666
+ AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
667
+
668
+ /*
669
+ * Reset the context, if it needs it, so that we aren't hanging on to
670
+ * more than the initial malloc chunk.
671
+ */
672
+ if (!context->isReset)
673
+ MemoryContextResetOnly(context);
674
+
675
+ /*
676
+ * If the freelist is full, just discard what's already in it. See
677
+ * comments with context_freelists[].
678
+ */
679
+ if (freelist->num_free >= MAX_FREE_CONTEXTS)
680
+ {
681
+ while (freelist->first_free != NULL)
682
+ {
683
+ AllocSetContext *oldset = freelist->first_free;
684
+
685
+ freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
686
+ freelist->num_free--;
687
+
688
+ /* All that remains is to free the header/initial block */
689
+ free(oldset);
690
+ }
691
+ Assert(freelist->num_free == 0);
692
+ }
693
+
694
+ /* Now add the just-deleted context to the freelist. */
695
+ set->header.nextchild = (MemoryContext) freelist->first_free;
696
+ freelist->first_free = set;
697
+ freelist->num_free++;
698
+
699
+ return;
700
+ }
701
+
702
+ /* Free all blocks, except the keeper which is part of context header */
703
+ while (block != NULL)
704
+ {
705
+ AllocBlock next = block->next;
706
+
707
+ if (block != set->keeper)
708
+ context->mem_allocated -= block->endptr - ((char *) block);
709
+
710
+ #ifdef CLOBBER_FREED_MEMORY
711
+ wipe_mem(block, block->freeptr - ((char *) block));
712
+ #endif
713
+
714
+ if (block != set->keeper)
715
+ free(block);
716
+
717
+ block = next;
718
+ }
719
+
720
+ Assert(context->mem_allocated == keepersize);
721
+
722
+ /* Finally, free the context header, including the keeper block */
723
+ free(set);
724
+ }
725
+
726
+ /*
727
+ * AllocSetAlloc
728
+ * Returns pointer to allocated memory of given size or NULL if
729
+ * request could not be completed; memory is added to the set.
730
+ *
731
+ * No request may exceed:
732
+ * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
733
+ * All callers use a much-lower limit.
734
+ *
735
+ * Note: when using valgrind, it doesn't matter how the returned allocation
736
+ * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
737
+ * return space that is marked NOACCESS - AllocSetRealloc has to beware!
738
+ */
739
+ static void *
740
+ AllocSetAlloc(MemoryContext context, Size size)
741
+ {
742
+ AllocSet set = (AllocSet) context;
743
+ AllocBlock block;
744
+ AllocChunk chunk;
745
+ int fidx;
746
+ Size chunk_size;
747
+ Size blksize;
748
+
749
+ AssertArg(AllocSetIsValid(set));
750
+
751
+ /*
752
+ * If requested size exceeds maximum for chunks, allocate an entire block
753
+ * for this request.
754
+ */
755
+ if (size > set->allocChunkLimit)
756
+ {
757
+ chunk_size = MAXALIGN(size);
758
+ blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
759
+ block = (AllocBlock) malloc(blksize);
760
+ if (block == NULL)
761
+ return NULL;
762
+
763
+ context->mem_allocated += blksize;
764
+
765
+ block->aset = set;
766
+ block->freeptr = block->endptr = ((char *) block) + blksize;
767
+
768
+ chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
769
+ chunk->aset = set;
770
+ chunk->size = chunk_size;
771
+ #ifdef MEMORY_CONTEXT_CHECKING
772
+ chunk->requested_size = size;
773
+ /* set mark to catch clobber of "unused" space */
774
+ if (size < chunk_size)
775
+ set_sentinel(AllocChunkGetPointer(chunk), size);
776
+ #endif
777
+ #ifdef RANDOMIZE_ALLOCATED_MEMORY
778
+ /* fill the allocated space with junk */
779
+ randomize_mem((char *) AllocChunkGetPointer(chunk), size);
780
+ #endif
781
+
782
+ /*
783
+ * Stick the new block underneath the active allocation block, if any,
784
+ * so that we don't lose the use of the space remaining therein.
785
+ */
786
+ if (set->blocks != NULL)
787
+ {
788
+ block->prev = set->blocks;
789
+ block->next = set->blocks->next;
790
+ if (block->next)
791
+ block->next->prev = block;
792
+ set->blocks->next = block;
793
+ }
794
+ else
795
+ {
796
+ block->prev = NULL;
797
+ block->next = NULL;
798
+ set->blocks = block;
799
+ }
800
+
801
+ /* Ensure any padding bytes are marked NOACCESS. */
802
+ VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
803
+ chunk_size - size);
804
+
805
+ /* Disallow external access to private part of chunk header. */
806
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
807
+
808
+ return AllocChunkGetPointer(chunk);
809
+ }
810
+
811
+ /*
812
+ * Request is small enough to be treated as a chunk. Look in the
813
+ * corresponding free list to see if there is a free chunk we could reuse.
814
+ * If one is found, remove it from the free list, make it again a member
815
+ * of the alloc set and return its data address.
816
+ */
817
+ fidx = AllocSetFreeIndex(size);
818
+ chunk = set->freelist[fidx];
819
+ if (chunk != NULL)
820
+ {
821
+ Assert(chunk->size >= size);
822
+
823
+ set->freelist[fidx] = (AllocChunk) chunk->aset;
824
+
825
+ chunk->aset = (void *) set;
826
+
827
+ #ifdef MEMORY_CONTEXT_CHECKING
828
+ chunk->requested_size = size;
829
+ /* set mark to catch clobber of "unused" space */
830
+ if (size < chunk->size)
831
+ set_sentinel(AllocChunkGetPointer(chunk), size);
832
+ #endif
833
+ #ifdef RANDOMIZE_ALLOCATED_MEMORY
834
+ /* fill the allocated space with junk */
835
+ randomize_mem((char *) AllocChunkGetPointer(chunk), size);
836
+ #endif
837
+
838
+ /* Ensure any padding bytes are marked NOACCESS. */
839
+ VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
840
+ chunk->size - size);
841
+
842
+ /* Disallow external access to private part of chunk header. */
843
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
844
+
845
+ return AllocChunkGetPointer(chunk);
846
+ }
847
+
848
+ /*
849
+ * Choose the actual chunk size to allocate.
850
+ */
851
+ chunk_size = (1 << ALLOC_MINBITS) << fidx;
852
+ Assert(chunk_size >= size);
853
+
854
+ /*
855
+ * If there is enough room in the active allocation block, we will put the
856
+ * chunk into that block. Else must start a new one.
857
+ */
858
+ if ((block = set->blocks) != NULL)
859
+ {
860
+ Size availspace = block->endptr - block->freeptr;
861
+
862
+ if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
863
+ {
864
+ /*
865
+ * The existing active (top) block does not have enough room for
866
+ * the requested allocation, but it might still have a useful
867
+ * amount of space in it. Once we push it down in the block list,
868
+ * we'll never try to allocate more space from it. So, before we
869
+ * do that, carve up its free space into chunks that we can put on
870
+ * the set's freelists.
871
+ *
872
+ * Because we can only get here when there's less than
873
+ * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
874
+ * more than ALLOCSET_NUM_FREELISTS-1 times.
875
+ */
876
+ while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
877
+ {
878
+ Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
879
+ int a_fidx = AllocSetFreeIndex(availchunk);
880
+
881
+ /*
882
+ * In most cases, we'll get back the index of the next larger
883
+ * freelist than the one we need to put this chunk on. The
884
+ * exception is when availchunk is exactly a power of 2.
885
+ */
886
+ if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
887
+ {
888
+ a_fidx--;
889
+ Assert(a_fidx >= 0);
890
+ availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
891
+ }
892
+
893
+ chunk = (AllocChunk) (block->freeptr);
894
+
895
+ /* Prepare to initialize the chunk header. */
896
+ VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
897
+
898
+ block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
899
+ availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
900
+
901
+ chunk->size = availchunk;
902
+ #ifdef MEMORY_CONTEXT_CHECKING
903
+ chunk->requested_size = 0; /* mark it free */
904
+ #endif
905
+ chunk->aset = (void *) set->freelist[a_fidx];
906
+ set->freelist[a_fidx] = chunk;
907
+ }
908
+
909
+ /* Mark that we need to create a new block */
910
+ block = NULL;
911
+ }
912
+ }
913
+
914
+ /*
915
+ * Time to create a new regular (multi-chunk) block?
916
+ */
917
+ if (block == NULL)
918
+ {
919
+ Size required_size;
920
+
921
+ /*
922
+ * The first such block has size initBlockSize, and we double the
923
+ * space in each succeeding block, but not more than maxBlockSize.
924
+ */
925
+ blksize = set->nextBlockSize;
926
+ set->nextBlockSize <<= 1;
927
+ if (set->nextBlockSize > set->maxBlockSize)
928
+ set->nextBlockSize = set->maxBlockSize;
929
+
930
+ /*
931
+ * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
932
+ * space... but try to keep it a power of 2.
933
+ */
934
+ required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
935
+ while (blksize < required_size)
936
+ blksize <<= 1;
937
+
938
+ /* Try to allocate it */
939
+ block = (AllocBlock) malloc(blksize);
940
+
941
+ /*
942
+ * We could be asking for pretty big blocks here, so cope if malloc
943
+ * fails. But give up if there's less than 1 MB or so available...
944
+ */
945
+ while (block == NULL && blksize > 1024 * 1024)
946
+ {
947
+ blksize >>= 1;
948
+ if (blksize < required_size)
949
+ break;
950
+ block = (AllocBlock) malloc(blksize);
951
+ }
952
+
953
+ if (block == NULL)
954
+ return NULL;
955
+
956
+ context->mem_allocated += blksize;
957
+
958
+ block->aset = set;
959
+ block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
960
+ block->endptr = ((char *) block) + blksize;
961
+
962
+ /* Mark unallocated space NOACCESS. */
963
+ VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
964
+ blksize - ALLOC_BLOCKHDRSZ);
965
+
966
+ block->prev = NULL;
967
+ block->next = set->blocks;
968
+ if (block->next)
969
+ block->next->prev = block;
970
+ set->blocks = block;
971
+ }
972
+
973
+ /*
974
+ * OK, do the allocation
975
+ */
976
+ chunk = (AllocChunk) (block->freeptr);
977
+
978
+ /* Prepare to initialize the chunk header. */
979
+ VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
980
+
981
+ block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
982
+ Assert(block->freeptr <= block->endptr);
983
+
984
+ chunk->aset = (void *) set;
985
+ chunk->size = chunk_size;
986
+ #ifdef MEMORY_CONTEXT_CHECKING
987
+ chunk->requested_size = size;
988
+ /* set mark to catch clobber of "unused" space */
989
+ if (size < chunk->size)
990
+ set_sentinel(AllocChunkGetPointer(chunk), size);
991
+ #endif
992
+ #ifdef RANDOMIZE_ALLOCATED_MEMORY
993
+ /* fill the allocated space with junk */
994
+ randomize_mem((char *) AllocChunkGetPointer(chunk), size);
995
+ #endif
996
+
997
+ /* Ensure any padding bytes are marked NOACCESS. */
998
+ VALGRIND_MAKE_MEM_NOACCESS((char *) AllocChunkGetPointer(chunk) + size,
999
+ chunk_size - size);
1000
+
1001
+ /* Disallow external access to private part of chunk header. */
1002
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1003
+
1004
+ return AllocChunkGetPointer(chunk);
1005
+ }
1006
+
1007
+ /*
1008
+ * AllocSetFree
1009
+ * Frees allocated memory; memory is removed from the set.
1010
+ */
1011
+ static void
1012
+ AllocSetFree(MemoryContext context, void *pointer)
1013
+ {
1014
+ AllocSet set = (AllocSet) context;
1015
+ AllocChunk chunk = AllocPointerGetChunk(pointer);
1016
+
1017
+ /* Allow access to private part of chunk header. */
1018
+ VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
1019
+
1020
+ #ifdef MEMORY_CONTEXT_CHECKING
1021
+ /* Test for someone scribbling on unused space in chunk */
1022
+ if (chunk->requested_size < chunk->size)
1023
+ if (!sentinel_ok(pointer, chunk->requested_size))
1024
+ elog(WARNING, "detected write past chunk end in %s %p",
1025
+ set->header.name, chunk);
1026
+ #endif
1027
+
1028
+ if (chunk->size > set->allocChunkLimit)
1029
+ {
1030
+ /*
1031
+ * Big chunks are certain to have been allocated as single-chunk
1032
+ * blocks. Just unlink that block and return it to malloc().
1033
+ */
1034
+ AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1035
+
1036
+ /*
1037
+ * Try to verify that we have a sane block pointer: it should
1038
+ * reference the correct aset, and freeptr and endptr should point
1039
+ * just past the chunk.
1040
+ */
1041
+ if (block->aset != set ||
1042
+ block->freeptr != block->endptr ||
1043
+ block->freeptr != ((char *) block) +
1044
+ (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1045
+ elog(ERROR, "could not find block containing chunk %p", chunk);
1046
+
1047
+ /* OK, remove block from aset's list and free it */
1048
+ if (block->prev)
1049
+ block->prev->next = block->next;
1050
+ else
1051
+ set->blocks = block->next;
1052
+ if (block->next)
1053
+ block->next->prev = block->prev;
1054
+
1055
+ context->mem_allocated -= block->endptr - ((char *) block);
1056
+
1057
+ #ifdef CLOBBER_FREED_MEMORY
1058
+ wipe_mem(block, block->freeptr - ((char *) block));
1059
+ #endif
1060
+ free(block);
1061
+ }
1062
+ else
1063
+ {
1064
+ /* Normal case, put the chunk into appropriate freelist */
1065
+ int fidx = AllocSetFreeIndex(chunk->size);
1066
+
1067
+ chunk->aset = (void *) set->freelist[fidx];
1068
+
1069
+ #ifdef CLOBBER_FREED_MEMORY
1070
+ wipe_mem(pointer, chunk->size);
1071
+ #endif
1072
+
1073
+ #ifdef MEMORY_CONTEXT_CHECKING
1074
+ /* Reset requested_size to 0 in chunks that are on freelist */
1075
+ chunk->requested_size = 0;
1076
+ #endif
1077
+ set->freelist[fidx] = chunk;
1078
+ }
1079
+ }
1080
+
1081
+ /*
1082
+ * AllocSetRealloc
1083
+ * Returns new pointer to allocated memory of given size or NULL if
1084
+ * request could not be completed; this memory is added to the set.
1085
+ * Memory associated with given pointer is copied into the new memory,
1086
+ * and the old memory is freed.
1087
+ *
1088
+ * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1089
+ * makes our Valgrind client requests less-precise, hazarding false negatives.
1090
+ * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1091
+ * request size.)
1092
+ */
1093
+ static void *
1094
+ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
1095
+ {
1096
+ AllocSet set = (AllocSet) context;
1097
+ AllocChunk chunk = AllocPointerGetChunk(pointer);
1098
+ Size oldsize;
1099
+
1100
+ /* Allow access to private part of chunk header. */
1101
+ VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
1102
+
1103
+ oldsize = chunk->size;
1104
+
1105
+ #ifdef MEMORY_CONTEXT_CHECKING
1106
+ /* Test for someone scribbling on unused space in chunk */
1107
+ if (chunk->requested_size < oldsize)
1108
+ if (!sentinel_ok(pointer, chunk->requested_size))
1109
+ elog(WARNING, "detected write past chunk end in %s %p",
1110
+ set->header.name, chunk);
1111
+ #endif
1112
+
1113
+ if (oldsize > set->allocChunkLimit)
1114
+ {
1115
+ /*
1116
+ * The chunk must have been allocated as a single-chunk block. Use
1117
+ * realloc() to make the containing block bigger, or smaller, with
1118
+ * minimum space wastage.
1119
+ */
1120
+ AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
1121
+ Size chksize;
1122
+ Size blksize;
1123
+ Size oldblksize;
1124
+
1125
+ /*
1126
+ * Try to verify that we have a sane block pointer: it should
1127
+ * reference the correct aset, and freeptr and endptr should point
1128
+ * just past the chunk.
1129
+ */
1130
+ if (block->aset != set ||
1131
+ block->freeptr != block->endptr ||
1132
+ block->freeptr != ((char *) block) +
1133
+ (oldsize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1134
+ elog(ERROR, "could not find block containing chunk %p", chunk);
1135
+
1136
+ /*
1137
+ * Even if the new request is less than set->allocChunkLimit, we stick
1138
+ * with the single-chunk block approach. Therefore we need
1139
+ * chunk->size to be bigger than set->allocChunkLimit, so we don't get
1140
+ * confused about the chunk's status in future calls.
1141
+ */
1142
+ chksize = Max(size, set->allocChunkLimit + 1);
1143
+ chksize = MAXALIGN(chksize);
1144
+
1145
+ /* Do the realloc */
1146
+ blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1147
+ oldblksize = block->endptr - ((char *) block);
1148
+
1149
+ block = (AllocBlock) realloc(block, blksize);
1150
+ if (block == NULL)
1151
+ {
1152
+ /* Disallow external access to private part of chunk header. */
1153
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1154
+ return NULL;
1155
+ }
1156
+
1157
+ /* updated separately, not to underflow when (oldblksize > blksize) */
1158
+ context->mem_allocated -= oldblksize;
1159
+ context->mem_allocated += blksize;
1160
+
1161
+ block->freeptr = block->endptr = ((char *) block) + blksize;
1162
+
1163
+ /* Update pointers since block has likely been moved */
1164
+ chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1165
+ pointer = AllocChunkGetPointer(chunk);
1166
+ if (block->prev)
1167
+ block->prev->next = block;
1168
+ else
1169
+ set->blocks = block;
1170
+ if (block->next)
1171
+ block->next->prev = block;
1172
+ chunk->size = chksize;
1173
+
1174
+ #ifdef MEMORY_CONTEXT_CHECKING
1175
+ #ifdef RANDOMIZE_ALLOCATED_MEMORY
1176
+ /* We can only fill the extra space if we know the prior request */
1177
+ if (size > chunk->requested_size)
1178
+ randomize_mem((char *) pointer + chunk->requested_size,
1179
+ size - chunk->requested_size);
1180
+ #endif
1181
+
1182
+ /*
1183
+ * realloc() (or randomize_mem()) will have left any newly-allocated
1184
+ * part UNDEFINED, but we may need to adjust trailing bytes from the
1185
+ * old allocation.
1186
+ */
1187
+ #ifdef USE_VALGRIND
1188
+ if (oldsize > chunk->requested_size)
1189
+ VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1190
+ oldsize - chunk->requested_size);
1191
+ #endif
1192
+
1193
+ chunk->requested_size = size;
1194
+
1195
+ /* set mark to catch clobber of "unused" space */
1196
+ if (size < chunk->size)
1197
+ set_sentinel(pointer, size);
1198
+ #else /* !MEMORY_CONTEXT_CHECKING */
1199
+
1200
+ /*
1201
+ * We don't know how much of the old chunk size was the actual
1202
+ * allocation; it could have been as small as one byte. We have to be
1203
+ * conservative and just mark the entire old portion DEFINED.
1204
+ */
1205
+ VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1206
+ #endif
1207
+
1208
+ /* Ensure any padding bytes are marked NOACCESS. */
1209
+ VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1210
+
1211
+ /* Disallow external access to private part of chunk header. */
1212
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1213
+
1214
+ return pointer;
1215
+ }
1216
+
1217
+ /*
1218
+ * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1219
+ * allocated area already is >= the new size. (In particular, we will
1220
+ * fall out here if the requested size is a decrease.)
1221
+ */
1222
+ else if (oldsize >= size)
1223
+ {
1224
+ #ifdef MEMORY_CONTEXT_CHECKING
1225
+ Size oldrequest = chunk->requested_size;
1226
+
1227
+ #ifdef RANDOMIZE_ALLOCATED_MEMORY
1228
+ /* We can only fill the extra space if we know the prior request */
1229
+ if (size > oldrequest)
1230
+ randomize_mem((char *) pointer + oldrequest,
1231
+ size - oldrequest);
1232
+ #endif
1233
+
1234
+ chunk->requested_size = size;
1235
+
1236
+ /*
1237
+ * If this is an increase, mark any newly-available part UNDEFINED.
1238
+ * Otherwise, mark the obsolete part NOACCESS.
1239
+ */
1240
+ if (size > oldrequest)
1241
+ VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1242
+ size - oldrequest);
1243
+ else
1244
+ VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1245
+ oldsize - size);
1246
+
1247
+ /* set mark to catch clobber of "unused" space */
1248
+ if (size < oldsize)
1249
+ set_sentinel(pointer, size);
1250
+ #else /* !MEMORY_CONTEXT_CHECKING */
1251
+
1252
+ /*
1253
+ * We don't have the information to determine whether we're growing
1254
+ * the old request or shrinking it, so we conservatively mark the
1255
+ * entire new allocation DEFINED.
1256
+ */
1257
+ VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
1258
+ VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1259
+ #endif
1260
+
1261
+ /* Disallow external access to private part of chunk header. */
1262
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1263
+
1264
+ return pointer;
1265
+ }
1266
+ else
1267
+ {
1268
+ /*
1269
+ * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1270
+ * allocate a new chunk and copy the data. Since we know the existing
1271
+ * data isn't huge, this won't involve any great memcpy expense, so
1272
+ * it's not worth being smarter. (At one time we tried to avoid
1273
+ * memcpy when it was possible to enlarge the chunk in-place, but that
1274
+ * turns out to misbehave unpleasantly for repeated cycles of
1275
+ * palloc/repalloc/pfree: the eventually freed chunks go into the
1276
+ * wrong freelist for the next initial palloc request, and so we leak
1277
+ * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1278
+ */
1279
+ AllocPointer newPointer;
1280
+
1281
+ /* allocate new chunk */
1282
+ newPointer = AllocSetAlloc((MemoryContext) set, size);
1283
+
1284
+ /* leave immediately if request was not completed */
1285
+ if (newPointer == NULL)
1286
+ {
1287
+ /* Disallow external access to private part of chunk header. */
1288
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1289
+ return NULL;
1290
+ }
1291
+
1292
+ /*
1293
+ * AllocSetAlloc() may have returned a region that is still NOACCESS.
1294
+ * Change it to UNDEFINED for the moment; memcpy() will then transfer
1295
+ * definedness from the old allocation to the new. If we know the old
1296
+ * allocation, copy just that much. Otherwise, make the entire old
1297
+ * chunk defined to avoid errors as we copy the currently-NOACCESS
1298
+ * trailing bytes.
1299
+ */
1300
+ VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1301
+ #ifdef MEMORY_CONTEXT_CHECKING
1302
+ oldsize = chunk->requested_size;
1303
+ #else
1304
+ VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1305
+ #endif
1306
+
1307
+ /* transfer existing data (certain to fit) */
1308
+ memcpy(newPointer, pointer, oldsize);
1309
+
1310
+ /* free old chunk */
1311
+ AllocSetFree((MemoryContext) set, pointer);
1312
+
1313
+ return newPointer;
1314
+ }
1315
+ }
1316
+
1317
+ /*
1318
+ * AllocSetGetChunkSpace
1319
+ * Given a currently-allocated chunk, determine the total space
1320
+ * it occupies (including all memory-allocation overhead).
1321
+ */
1322
+ static Size
1323
+ AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1324
+ {
1325
+ AllocChunk chunk = AllocPointerGetChunk(pointer);
1326
+ Size result;
1327
+
1328
+ VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
1329
+ result = chunk->size + ALLOC_CHUNKHDRSZ;
1330
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1331
+ return result;
1332
+ }
1333
+
1334
+ /*
1335
+ * AllocSetIsEmpty
1336
+ * Is an allocset empty of any allocated space?
1337
+ */
1338
+ static bool
1339
+ AllocSetIsEmpty(MemoryContext context)
1340
+ {
1341
+ /*
1342
+ * For now, we say "empty" only if the context is new or just reset. We
1343
+ * could examine the freelists to determine if all space has been freed,
1344
+ * but it's not really worth the trouble for present uses of this
1345
+ * functionality.
1346
+ */
1347
+ if (context->isReset)
1348
+ return true;
1349
+ return false;
1350
+ }
1351
+
1352
+ /*
1353
+ * AllocSetStats
1354
+ * Compute stats about memory consumption of an allocset.
1355
+ *
1356
+ * printfunc: if not NULL, pass a human-readable stats string to this.
1357
+ * passthru: pass this pointer through to printfunc.
1358
+ * totals: if not NULL, add stats about this context into *totals.
1359
+ */
1360
+ static void
1361
+ AllocSetStats(MemoryContext context,
1362
+ MemoryStatsPrintFunc printfunc, void *passthru,
1363
+ MemoryContextCounters *totals)
1364
+ {
1365
+ AllocSet set = (AllocSet) context;
1366
+ Size nblocks = 0;
1367
+ Size freechunks = 0;
1368
+ Size totalspace;
1369
+ Size freespace = 0;
1370
+ AllocBlock block;
1371
+ int fidx;
1372
+
1373
+ /* Include context header in totalspace */
1374
+ totalspace = MAXALIGN(sizeof(AllocSetContext));
1375
+
1376
+ for (block = set->blocks; block != NULL; block = block->next)
1377
+ {
1378
+ nblocks++;
1379
+ totalspace += block->endptr - ((char *) block);
1380
+ freespace += block->endptr - block->freeptr;
1381
+ }
1382
+ for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1383
+ {
1384
+ AllocChunk chunk;
1385
+
1386
+ for (chunk = set->freelist[fidx]; chunk != NULL;
1387
+ chunk = (AllocChunk) chunk->aset)
1388
+ {
1389
+ freechunks++;
1390
+ freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1391
+ }
1392
+ }
1393
+
1394
+ if (printfunc)
1395
+ {
1396
+ char stats_string[200];
1397
+
1398
+ snprintf(stats_string, sizeof(stats_string),
1399
+ "%zu total in %zd blocks; %zu free (%zd chunks); %zu used",
1400
+ totalspace, nblocks, freespace, freechunks,
1401
+ totalspace - freespace);
1402
+ printfunc(context, passthru, stats_string);
1403
+ }
1404
+
1405
+ if (totals)
1406
+ {
1407
+ totals->nblocks += nblocks;
1408
+ totals->freechunks += freechunks;
1409
+ totals->totalspace += totalspace;
1410
+ totals->freespace += freespace;
1411
+ }
1412
+ }
1413
+
1414
+
1415
+ #ifdef MEMORY_CONTEXT_CHECKING
1416
+
1417
+ /*
1418
+ * AllocSetCheck
1419
+ * Walk through chunks and check consistency of memory.
1420
+ *
1421
+ * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1422
+ * find yourself in an infinite loop when trouble occurs, because this
1423
+ * routine will be entered again when elog cleanup tries to release memory!
1424
+ */
1425
+ static void
1426
+ AllocSetCheck(MemoryContext context)
1427
+ {
1428
+ AllocSet set = (AllocSet) context;
1429
+ const char *name = set->header.name;
1430
+ AllocBlock prevblock;
1431
+ AllocBlock block;
1432
+ Size total_allocated = 0;
1433
+
1434
+ for (prevblock = NULL, block = set->blocks;
1435
+ block != NULL;
1436
+ prevblock = block, block = block->next)
1437
+ {
1438
+ char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1439
+ long blk_used = block->freeptr - bpoz;
1440
+ long blk_data = 0;
1441
+ long nchunks = 0;
1442
+
1443
+ if (set->keeper == block)
1444
+ total_allocated += block->endptr - ((char *) set);
1445
+ else
1446
+ total_allocated += block->endptr - ((char *) block);
1447
+
1448
+ /*
1449
+ * Empty block - empty can be keeper-block only
1450
+ */
1451
+ if (!blk_used)
1452
+ {
1453
+ if (set->keeper != block)
1454
+ elog(WARNING, "problem in alloc set %s: empty block %p",
1455
+ name, block);
1456
+ }
1457
+
1458
+ /*
1459
+ * Check block header fields
1460
+ */
1461
+ if (block->aset != set ||
1462
+ block->prev != prevblock ||
1463
+ block->freeptr < bpoz ||
1464
+ block->freeptr > block->endptr)
1465
+ elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1466
+ name, block);
1467
+
1468
+ /*
1469
+ * Chunk walker
1470
+ */
1471
+ while (bpoz < block->freeptr)
1472
+ {
1473
+ AllocChunk chunk = (AllocChunk) bpoz;
1474
+ Size chsize,
1475
+ dsize;
1476
+
1477
+ /* Allow access to private part of chunk header. */
1478
+ VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
1479
+
1480
+ chsize = chunk->size; /* aligned chunk size */
1481
+ dsize = chunk->requested_size; /* real data */
1482
+
1483
+ /*
1484
+ * Check chunk size
1485
+ */
1486
+ if (dsize > chsize)
1487
+ elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1488
+ name, chunk, block);
1489
+ if (chsize < (1 << ALLOC_MINBITS))
1490
+ elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1491
+ name, chsize, chunk, block);
1492
+
1493
+ /* single-chunk block? */
1494
+ if (chsize > set->allocChunkLimit &&
1495
+ chsize + ALLOC_CHUNKHDRSZ != blk_used)
1496
+ elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1497
+ name, chunk, block);
1498
+
1499
+ /*
1500
+ * If chunk is allocated, check for correct aset pointer. (If it's
1501
+ * free, the aset is the freelist pointer, which we can't check as
1502
+ * easily...) Note this is an incomplete test, since palloc(0)
1503
+ * produces an allocated chunk with requested_size == 0.
1504
+ */
1505
+ if (dsize > 0 && chunk->aset != (void *) set)
1506
+ elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1507
+ name, block, chunk);
1508
+
1509
+ /*
1510
+ * Check for overwrite of padding space in an allocated chunk.
1511
+ */
1512
+ if (chunk->aset == (void *) set && dsize < chsize &&
1513
+ !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1514
+ elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1515
+ name, block, chunk);
1516
+
1517
+ /*
1518
+ * If chunk is allocated, disallow external access to private part
1519
+ * of chunk header.
1520
+ */
1521
+ if (chunk->aset == (void *) set)
1522
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1523
+
1524
+ blk_data += chsize;
1525
+ nchunks++;
1526
+
1527
+ bpoz += ALLOC_CHUNKHDRSZ + chsize;
1528
+ }
1529
+
1530
+ if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1531
+ elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1532
+ name, block);
1533
+ }
1534
+
1535
+ Assert(total_allocated == context->mem_allocated);
1536
+ }
1537
+
1538
+ #endif /* MEMORY_CONTEXT_CHECKING */
1539
+
1540
+ void
1541
+ AllocSetDeleteFreeList(MemoryContext context)
1542
+ {
1543
+ AllocSet set = (AllocSet) context;
1544
+ if (set->freeListIndex >= 0)
1545
+ {
1546
+ AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
1547
+
1548
+ while (freelist->first_free != NULL)
1549
+ {
1550
+ AllocSetContext *oldset = freelist->first_free;
1551
+
1552
+ freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
1553
+ freelist->num_free--;
1554
+
1555
+ /* All that remains is to free the header/initial block */
1556
+ free(oldset);
1557
+ }
1558
+ Assert(freelist->num_free == 0);
1559
+ }
1560
+ }