pg_query 1.1.0 → 2.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +163 -52
- data/README.md +80 -69
- data/Rakefile +82 -1
- data/ext/pg_query/extconf.rb +3 -31
- data/ext/pg_query/guc-file.c +0 -0
- data/ext/pg_query/include/access/amapi.h +246 -0
- data/ext/pg_query/include/access/attmap.h +52 -0
- data/ext/pg_query/include/access/attnum.h +64 -0
- data/ext/pg_query/include/access/clog.h +61 -0
- data/ext/pg_query/include/access/commit_ts.h +77 -0
- data/ext/pg_query/include/access/detoast.h +92 -0
- data/ext/pg_query/include/access/genam.h +228 -0
- data/ext/pg_query/include/access/gin.h +78 -0
- data/ext/pg_query/include/access/htup.h +89 -0
- data/ext/pg_query/include/access/htup_details.h +819 -0
- data/ext/pg_query/include/access/itup.h +161 -0
- data/ext/pg_query/include/access/parallel.h +82 -0
- data/ext/pg_query/include/access/printtup.h +35 -0
- data/ext/pg_query/include/access/relation.h +28 -0
- data/ext/pg_query/include/access/relscan.h +176 -0
- data/ext/pg_query/include/access/rmgr.h +35 -0
- data/ext/pg_query/include/access/rmgrlist.h +49 -0
- data/ext/pg_query/include/access/sdir.h +58 -0
- data/ext/pg_query/include/access/skey.h +151 -0
- data/ext/pg_query/include/access/stratnum.h +83 -0
- data/ext/pg_query/include/access/sysattr.h +29 -0
- data/ext/pg_query/include/access/table.h +27 -0
- data/ext/pg_query/include/access/tableam.h +1825 -0
- data/ext/pg_query/include/access/transam.h +265 -0
- data/ext/pg_query/include/access/tupconvert.h +51 -0
- data/ext/pg_query/include/access/tupdesc.h +154 -0
- data/ext/pg_query/include/access/tupmacs.h +247 -0
- data/ext/pg_query/include/access/twophase.h +61 -0
- data/ext/pg_query/include/access/xact.h +463 -0
- data/ext/pg_query/include/access/xlog.h +398 -0
- data/ext/pg_query/include/access/xlog_internal.h +330 -0
- data/ext/pg_query/include/access/xlogdefs.h +109 -0
- data/ext/pg_query/include/access/xloginsert.h +64 -0
- data/ext/pg_query/include/access/xlogreader.h +327 -0
- data/ext/pg_query/include/access/xlogrecord.h +227 -0
- data/ext/pg_query/include/bootstrap/bootstrap.h +62 -0
- data/ext/pg_query/include/c.h +1322 -0
- data/ext/pg_query/include/catalog/catalog.h +42 -0
- data/ext/pg_query/include/catalog/catversion.h +58 -0
- data/ext/pg_query/include/catalog/dependency.h +275 -0
- data/ext/pg_query/include/catalog/genbki.h +64 -0
- data/ext/pg_query/include/catalog/index.h +199 -0
- data/ext/pg_query/include/catalog/indexing.h +366 -0
- data/ext/pg_query/include/catalog/namespace.h +188 -0
- data/ext/pg_query/include/catalog/objectaccess.h +197 -0
- data/ext/pg_query/include/catalog/objectaddress.h +84 -0
- data/ext/pg_query/include/catalog/pg_aggregate.h +176 -0
- data/ext/pg_query/include/catalog/pg_aggregate_d.h +77 -0
- data/ext/pg_query/include/catalog/pg_am.h +60 -0
- data/ext/pg_query/include/catalog/pg_am_d.h +45 -0
- data/ext/pg_query/include/catalog/pg_attribute.h +204 -0
- data/ext/pg_query/include/catalog/pg_attribute_d.h +59 -0
- data/ext/pg_query/include/catalog/pg_authid.h +58 -0
- data/ext/pg_query/include/catalog/pg_authid_d.h +49 -0
- data/ext/pg_query/include/catalog/pg_class.h +200 -0
- data/ext/pg_query/include/catalog/pg_class_d.h +103 -0
- data/ext/pg_query/include/catalog/pg_collation.h +73 -0
- data/ext/pg_query/include/catalog/pg_collation_d.h +45 -0
- data/ext/pg_query/include/catalog/pg_constraint.h +247 -0
- data/ext/pg_query/include/catalog/pg_constraint_d.h +67 -0
- data/ext/pg_query/include/catalog/pg_control.h +250 -0
- data/ext/pg_query/include/catalog/pg_conversion.h +72 -0
- data/ext/pg_query/include/catalog/pg_conversion_d.h +35 -0
- data/ext/pg_query/include/catalog/pg_depend.h +73 -0
- data/ext/pg_query/include/catalog/pg_depend_d.h +34 -0
- data/ext/pg_query/include/catalog/pg_event_trigger.h +51 -0
- data/ext/pg_query/include/catalog/pg_event_trigger_d.h +34 -0
- data/ext/pg_query/include/catalog/pg_index.h +80 -0
- data/ext/pg_query/include/catalog/pg_index_d.h +56 -0
- data/ext/pg_query/include/catalog/pg_language.h +67 -0
- data/ext/pg_query/include/catalog/pg_language_d.h +39 -0
- data/ext/pg_query/include/catalog/pg_namespace.h +59 -0
- data/ext/pg_query/include/catalog/pg_namespace_d.h +34 -0
- data/ext/pg_query/include/catalog/pg_opclass.h +85 -0
- data/ext/pg_query/include/catalog/pg_opclass_d.h +49 -0
- data/ext/pg_query/include/catalog/pg_operator.h +102 -0
- data/ext/pg_query/include/catalog/pg_operator_d.h +106 -0
- data/ext/pg_query/include/catalog/pg_opfamily.h +60 -0
- data/ext/pg_query/include/catalog/pg_opfamily_d.h +47 -0
- data/ext/pg_query/include/catalog/pg_partitioned_table.h +63 -0
- data/ext/pg_query/include/catalog/pg_partitioned_table_d.h +35 -0
- data/ext/pg_query/include/catalog/pg_proc.h +211 -0
- data/ext/pg_query/include/catalog/pg_proc_d.h +99 -0
- data/ext/pg_query/include/catalog/pg_publication.h +115 -0
- data/ext/pg_query/include/catalog/pg_publication_d.h +36 -0
- data/ext/pg_query/include/catalog/pg_replication_origin.h +57 -0
- data/ext/pg_query/include/catalog/pg_replication_origin_d.h +29 -0
- data/ext/pg_query/include/catalog/pg_statistic.h +275 -0
- data/ext/pg_query/include/catalog/pg_statistic_d.h +194 -0
- data/ext/pg_query/include/catalog/pg_statistic_ext.h +74 -0
- data/ext/pg_query/include/catalog/pg_statistic_ext_d.h +40 -0
- data/ext/pg_query/include/catalog/pg_transform.h +45 -0
- data/ext/pg_query/include/catalog/pg_transform_d.h +32 -0
- data/ext/pg_query/include/catalog/pg_trigger.h +137 -0
- data/ext/pg_query/include/catalog/pg_trigger_d.h +106 -0
- data/ext/pg_query/include/catalog/pg_ts_config.h +50 -0
- data/ext/pg_query/include/catalog/pg_ts_config_d.h +32 -0
- data/ext/pg_query/include/catalog/pg_ts_dict.h +54 -0
- data/ext/pg_query/include/catalog/pg_ts_dict_d.h +33 -0
- data/ext/pg_query/include/catalog/pg_ts_parser.h +57 -0
- data/ext/pg_query/include/catalog/pg_ts_parser_d.h +35 -0
- data/ext/pg_query/include/catalog/pg_ts_template.h +48 -0
- data/ext/pg_query/include/catalog/pg_ts_template_d.h +32 -0
- data/ext/pg_query/include/catalog/pg_type.h +372 -0
- data/ext/pg_query/include/catalog/pg_type_d.h +285 -0
- data/ext/pg_query/include/catalog/storage.h +48 -0
- data/ext/pg_query/include/commands/async.h +54 -0
- data/ext/pg_query/include/commands/dbcommands.h +35 -0
- data/ext/pg_query/include/commands/defrem.h +173 -0
- data/ext/pg_query/include/commands/event_trigger.h +88 -0
- data/ext/pg_query/include/commands/explain.h +127 -0
- data/ext/pg_query/include/commands/prepare.h +61 -0
- data/ext/pg_query/include/commands/tablespace.h +67 -0
- data/ext/pg_query/include/commands/trigger.h +277 -0
- data/ext/pg_query/include/commands/user.h +37 -0
- data/ext/pg_query/include/commands/vacuum.h +293 -0
- data/ext/pg_query/include/commands/variable.h +38 -0
- data/ext/pg_query/include/common/file_perm.h +56 -0
- data/ext/pg_query/include/common/hashfn.h +104 -0
- data/ext/pg_query/include/common/ip.h +37 -0
- data/ext/pg_query/include/common/keywords.h +33 -0
- data/ext/pg_query/include/common/kwlookup.h +44 -0
- data/ext/pg_query/include/common/relpath.h +90 -0
- data/ext/pg_query/include/common/string.h +19 -0
- data/ext/pg_query/include/common/unicode_combining_table.h +196 -0
- data/ext/pg_query/include/datatype/timestamp.h +197 -0
- data/ext/pg_query/include/executor/execdesc.h +70 -0
- data/ext/pg_query/include/executor/executor.h +614 -0
- data/ext/pg_query/include/executor/functions.h +41 -0
- data/ext/pg_query/include/executor/instrument.h +101 -0
- data/ext/pg_query/include/executor/spi.h +175 -0
- data/ext/pg_query/include/executor/tablefunc.h +67 -0
- data/ext/pg_query/include/executor/tuptable.h +487 -0
- data/ext/pg_query/include/fmgr.h +775 -0
- data/ext/pg_query/include/funcapi.h +348 -0
- data/ext/pg_query/include/getaddrinfo.h +162 -0
- data/ext/pg_query/include/jit/jit.h +105 -0
- data/ext/pg_query/include/kwlist_d.h +1072 -0
- data/ext/pg_query/include/lib/ilist.h +727 -0
- data/ext/pg_query/include/lib/pairingheap.h +102 -0
- data/ext/pg_query/include/lib/simplehash.h +1059 -0
- data/ext/pg_query/include/lib/stringinfo.h +161 -0
- data/ext/pg_query/include/libpq/auth.h +29 -0
- data/ext/pg_query/include/libpq/crypt.h +46 -0
- data/ext/pg_query/include/libpq/hba.h +140 -0
- data/ext/pg_query/include/libpq/libpq-be.h +326 -0
- data/ext/pg_query/include/libpq/libpq.h +133 -0
- data/ext/pg_query/include/libpq/pqcomm.h +208 -0
- data/ext/pg_query/include/libpq/pqformat.h +210 -0
- data/ext/pg_query/include/libpq/pqsignal.h +42 -0
- data/ext/pg_query/include/mb/pg_wchar.h +672 -0
- data/ext/pg_query/include/mb/stringinfo_mb.h +24 -0
- data/ext/pg_query/include/miscadmin.h +476 -0
- data/ext/pg_query/include/nodes/bitmapset.h +122 -0
- data/ext/pg_query/include/nodes/execnodes.h +2520 -0
- data/ext/pg_query/include/nodes/extensible.h +160 -0
- data/ext/pg_query/include/nodes/lockoptions.h +61 -0
- data/ext/pg_query/include/nodes/makefuncs.h +108 -0
- data/ext/pg_query/include/nodes/memnodes.h +108 -0
- data/ext/pg_query/include/nodes/nodeFuncs.h +162 -0
- data/ext/pg_query/include/nodes/nodes.h +842 -0
- data/ext/pg_query/include/nodes/params.h +170 -0
- data/ext/pg_query/include/nodes/parsenodes.h +3579 -0
- data/ext/pg_query/include/nodes/pathnodes.h +2556 -0
- data/ext/pg_query/include/nodes/pg_list.h +605 -0
- data/ext/pg_query/include/nodes/plannodes.h +1251 -0
- data/ext/pg_query/include/nodes/primnodes.h +1541 -0
- data/ext/pg_query/include/nodes/print.h +34 -0
- data/ext/pg_query/include/nodes/tidbitmap.h +75 -0
- data/ext/pg_query/include/nodes/value.h +61 -0
- data/ext/pg_query/include/optimizer/cost.h +206 -0
- data/ext/pg_query/include/optimizer/geqo.h +88 -0
- data/ext/pg_query/include/optimizer/geqo_gene.h +45 -0
- data/ext/pg_query/include/optimizer/optimizer.h +199 -0
- data/ext/pg_query/include/optimizer/paths.h +249 -0
- data/ext/pg_query/include/optimizer/planmain.h +119 -0
- data/ext/pg_query/include/parser/analyze.h +49 -0
- data/ext/pg_query/include/parser/gram.h +1067 -0
- data/ext/pg_query/include/parser/gramparse.h +75 -0
- data/ext/pg_query/include/parser/kwlist.h +477 -0
- data/ext/pg_query/include/parser/parse_agg.h +68 -0
- data/ext/pg_query/include/parser/parse_clause.h +54 -0
- data/ext/pg_query/include/parser/parse_coerce.h +97 -0
- data/ext/pg_query/include/parser/parse_collate.h +27 -0
- data/ext/pg_query/include/parser/parse_expr.h +26 -0
- data/ext/pg_query/include/parser/parse_func.h +73 -0
- data/ext/pg_query/include/parser/parse_node.h +327 -0
- data/ext/pg_query/include/parser/parse_oper.h +67 -0
- data/ext/pg_query/include/parser/parse_relation.h +123 -0
- data/ext/pg_query/include/parser/parse_target.h +46 -0
- data/ext/pg_query/include/parser/parse_type.h +60 -0
- data/ext/pg_query/include/parser/parser.h +41 -0
- data/ext/pg_query/include/parser/parsetree.h +61 -0
- data/ext/pg_query/include/parser/scanner.h +152 -0
- data/ext/pg_query/include/parser/scansup.h +30 -0
- data/ext/pg_query/include/partitioning/partdefs.h +26 -0
- data/ext/pg_query/include/pg_config.h +988 -0
- data/ext/pg_query/include/pg_config_ext.h +8 -0
- data/ext/pg_query/include/pg_config_manual.h +350 -0
- data/ext/pg_query/include/pg_config_os.h +8 -0
- data/ext/pg_query/include/pg_getopt.h +56 -0
- data/ext/pg_query/include/pg_query.h +121 -0
- data/ext/pg_query/include/pg_query_enum_defs.c +2454 -0
- data/ext/pg_query/include/pg_query_fingerprint_conds.c +875 -0
- data/ext/pg_query/include/pg_query_fingerprint_defs.c +12413 -0
- data/ext/pg_query/include/pg_query_json_helper.c +61 -0
- data/ext/pg_query/include/pg_query_outfuncs_conds.c +686 -0
- data/ext/pg_query/include/pg_query_outfuncs_defs.c +2437 -0
- data/ext/pg_query/include/pg_query_readfuncs_conds.c +222 -0
- data/ext/pg_query/include/pg_query_readfuncs_defs.c +2878 -0
- data/ext/pg_query/include/pg_trace.h +17 -0
- data/ext/pg_query/include/pgstat.h +1487 -0
- data/ext/pg_query/include/pgtime.h +84 -0
- data/ext/pg_query/include/pl_gram.h +385 -0
- data/ext/pg_query/include/pl_reserved_kwlist.h +52 -0
- data/ext/pg_query/include/pl_reserved_kwlist_d.h +114 -0
- data/ext/pg_query/include/pl_unreserved_kwlist.h +112 -0
- data/ext/pg_query/include/pl_unreserved_kwlist_d.h +246 -0
- data/ext/pg_query/include/plerrcodes.h +990 -0
- data/ext/pg_query/include/plpgsql.h +1347 -0
- data/ext/pg_query/include/port.h +524 -0
- data/ext/pg_query/include/port/atomics.h +524 -0
- data/ext/pg_query/include/port/atomics/arch-arm.h +26 -0
- data/ext/pg_query/include/port/atomics/arch-ppc.h +254 -0
- data/ext/pg_query/include/port/atomics/arch-x86.h +252 -0
- data/ext/pg_query/include/port/atomics/fallback.h +170 -0
- data/ext/pg_query/include/port/atomics/generic-gcc.h +286 -0
- data/ext/pg_query/include/port/atomics/generic.h +401 -0
- data/ext/pg_query/include/port/pg_bitutils.h +226 -0
- data/ext/pg_query/include/port/pg_bswap.h +161 -0
- data/ext/pg_query/include/port/pg_crc32c.h +101 -0
- data/ext/pg_query/include/portability/instr_time.h +256 -0
- data/ext/pg_query/include/postgres.h +764 -0
- data/ext/pg_query/include/postgres_ext.h +74 -0
- data/ext/pg_query/include/postmaster/autovacuum.h +83 -0
- data/ext/pg_query/include/postmaster/bgworker.h +161 -0
- data/ext/pg_query/include/postmaster/bgworker_internals.h +64 -0
- data/ext/pg_query/include/postmaster/bgwriter.h +45 -0
- data/ext/pg_query/include/postmaster/fork_process.h +17 -0
- data/ext/pg_query/include/postmaster/interrupt.h +32 -0
- data/ext/pg_query/include/postmaster/pgarch.h +39 -0
- data/ext/pg_query/include/postmaster/postmaster.h +77 -0
- data/ext/pg_query/include/postmaster/syslogger.h +98 -0
- data/ext/pg_query/include/postmaster/walwriter.h +21 -0
- data/ext/pg_query/include/protobuf-c.h +1106 -0
- data/ext/pg_query/include/protobuf-c/protobuf-c.h +1106 -0
- data/ext/pg_query/include/protobuf/pg_query.pb-c.h +10846 -0
- data/ext/pg_query/include/protobuf/pg_query.pb.h +124718 -0
- data/ext/pg_query/include/regex/regex.h +184 -0
- data/ext/pg_query/include/replication/logicallauncher.h +31 -0
- data/ext/pg_query/include/replication/logicalproto.h +110 -0
- data/ext/pg_query/include/replication/logicalworker.h +19 -0
- data/ext/pg_query/include/replication/origin.h +73 -0
- data/ext/pg_query/include/replication/reorderbuffer.h +467 -0
- data/ext/pg_query/include/replication/slot.h +219 -0
- data/ext/pg_query/include/replication/syncrep.h +115 -0
- data/ext/pg_query/include/replication/walreceiver.h +340 -0
- data/ext/pg_query/include/replication/walsender.h +74 -0
- data/ext/pg_query/include/rewrite/prs2lock.h +46 -0
- data/ext/pg_query/include/rewrite/rewriteHandler.h +40 -0
- data/ext/pg_query/include/rewrite/rewriteManip.h +87 -0
- data/ext/pg_query/include/rewrite/rewriteSupport.h +26 -0
- data/ext/pg_query/include/storage/backendid.h +37 -0
- data/ext/pg_query/include/storage/block.h +121 -0
- data/ext/pg_query/include/storage/buf.h +46 -0
- data/ext/pg_query/include/storage/bufmgr.h +292 -0
- data/ext/pg_query/include/storage/bufpage.h +459 -0
- data/ext/pg_query/include/storage/condition_variable.h +62 -0
- data/ext/pg_query/include/storage/dsm.h +61 -0
- data/ext/pg_query/include/storage/dsm_impl.h +75 -0
- data/ext/pg_query/include/storage/fd.h +168 -0
- data/ext/pg_query/include/storage/ipc.h +81 -0
- data/ext/pg_query/include/storage/item.h +19 -0
- data/ext/pg_query/include/storage/itemid.h +184 -0
- data/ext/pg_query/include/storage/itemptr.h +206 -0
- data/ext/pg_query/include/storage/large_object.h +100 -0
- data/ext/pg_query/include/storage/latch.h +190 -0
- data/ext/pg_query/include/storage/lmgr.h +114 -0
- data/ext/pg_query/include/storage/lock.h +612 -0
- data/ext/pg_query/include/storage/lockdefs.h +59 -0
- data/ext/pg_query/include/storage/lwlock.h +232 -0
- data/ext/pg_query/include/storage/lwlocknames.h +51 -0
- data/ext/pg_query/include/storage/off.h +57 -0
- data/ext/pg_query/include/storage/pg_sema.h +61 -0
- data/ext/pg_query/include/storage/pg_shmem.h +90 -0
- data/ext/pg_query/include/storage/pmsignal.h +94 -0
- data/ext/pg_query/include/storage/predicate.h +87 -0
- data/ext/pg_query/include/storage/proc.h +333 -0
- data/ext/pg_query/include/storage/proclist_types.h +51 -0
- data/ext/pg_query/include/storage/procsignal.h +75 -0
- data/ext/pg_query/include/storage/relfilenode.h +99 -0
- data/ext/pg_query/include/storage/s_lock.h +1047 -0
- data/ext/pg_query/include/storage/sharedfileset.h +45 -0
- data/ext/pg_query/include/storage/shm_mq.h +85 -0
- data/ext/pg_query/include/storage/shm_toc.h +58 -0
- data/ext/pg_query/include/storage/shmem.h +81 -0
- data/ext/pg_query/include/storage/sinval.h +153 -0
- data/ext/pg_query/include/storage/sinvaladt.h +43 -0
- data/ext/pg_query/include/storage/smgr.h +109 -0
- data/ext/pg_query/include/storage/spin.h +77 -0
- data/ext/pg_query/include/storage/standby.h +91 -0
- data/ext/pg_query/include/storage/standbydefs.h +74 -0
- data/ext/pg_query/include/storage/sync.h +62 -0
- data/ext/pg_query/include/tcop/cmdtag.h +58 -0
- data/ext/pg_query/include/tcop/cmdtaglist.h +217 -0
- data/ext/pg_query/include/tcop/deparse_utility.h +108 -0
- data/ext/pg_query/include/tcop/dest.h +149 -0
- data/ext/pg_query/include/tcop/fastpath.h +21 -0
- data/ext/pg_query/include/tcop/pquery.h +45 -0
- data/ext/pg_query/include/tcop/tcopprot.h +89 -0
- data/ext/pg_query/include/tcop/utility.h +108 -0
- data/ext/pg_query/include/tsearch/ts_cache.h +98 -0
- data/ext/pg_query/include/utils/acl.h +312 -0
- data/ext/pg_query/include/utils/aclchk_internal.h +45 -0
- data/ext/pg_query/include/utils/array.h +458 -0
- data/ext/pg_query/include/utils/builtins.h +127 -0
- data/ext/pg_query/include/utils/bytea.h +27 -0
- data/ext/pg_query/include/utils/catcache.h +231 -0
- data/ext/pg_query/include/utils/date.h +90 -0
- data/ext/pg_query/include/utils/datetime.h +343 -0
- data/ext/pg_query/include/utils/datum.h +68 -0
- data/ext/pg_query/include/utils/dsa.h +123 -0
- data/ext/pg_query/include/utils/dynahash.h +19 -0
- data/ext/pg_query/include/utils/elog.h +439 -0
- data/ext/pg_query/include/utils/errcodes.h +352 -0
- data/ext/pg_query/include/utils/expandeddatum.h +159 -0
- data/ext/pg_query/include/utils/expandedrecord.h +231 -0
- data/ext/pg_query/include/utils/float.h +356 -0
- data/ext/pg_query/include/utils/fmgroids.h +2657 -0
- data/ext/pg_query/include/utils/fmgrprotos.h +2646 -0
- data/ext/pg_query/include/utils/fmgrtab.h +48 -0
- data/ext/pg_query/include/utils/guc.h +443 -0
- data/ext/pg_query/include/utils/guc_tables.h +272 -0
- data/ext/pg_query/include/utils/hsearch.h +149 -0
- data/ext/pg_query/include/utils/inval.h +64 -0
- data/ext/pg_query/include/utils/lsyscache.h +197 -0
- data/ext/pg_query/include/utils/memdebug.h +82 -0
- data/ext/pg_query/include/utils/memutils.h +225 -0
- data/ext/pg_query/include/utils/numeric.h +76 -0
- data/ext/pg_query/include/utils/palloc.h +136 -0
- data/ext/pg_query/include/utils/partcache.h +102 -0
- data/ext/pg_query/include/utils/pg_locale.h +119 -0
- data/ext/pg_query/include/utils/pg_lsn.h +29 -0
- data/ext/pg_query/include/utils/pidfile.h +56 -0
- data/ext/pg_query/include/utils/plancache.h +235 -0
- data/ext/pg_query/include/utils/portal.h +241 -0
- data/ext/pg_query/include/utils/probes.h +114 -0
- data/ext/pg_query/include/utils/ps_status.h +25 -0
- data/ext/pg_query/include/utils/queryenvironment.h +74 -0
- data/ext/pg_query/include/utils/regproc.h +28 -0
- data/ext/pg_query/include/utils/rel.h +644 -0
- data/ext/pg_query/include/utils/relcache.h +151 -0
- data/ext/pg_query/include/utils/reltrigger.h +81 -0
- data/ext/pg_query/include/utils/resowner.h +86 -0
- data/ext/pg_query/include/utils/rls.h +50 -0
- data/ext/pg_query/include/utils/ruleutils.h +44 -0
- data/ext/pg_query/include/utils/sharedtuplestore.h +61 -0
- data/ext/pg_query/include/utils/snapmgr.h +158 -0
- data/ext/pg_query/include/utils/snapshot.h +206 -0
- data/ext/pg_query/include/utils/sortsupport.h +276 -0
- data/ext/pg_query/include/utils/syscache.h +219 -0
- data/ext/pg_query/include/utils/timeout.h +88 -0
- data/ext/pg_query/include/utils/timestamp.h +116 -0
- data/ext/pg_query/include/utils/tuplesort.h +277 -0
- data/ext/pg_query/include/utils/tuplestore.h +91 -0
- data/ext/pg_query/include/utils/typcache.h +202 -0
- data/ext/pg_query/include/utils/tzparser.h +39 -0
- data/ext/pg_query/include/utils/varlena.h +39 -0
- data/ext/pg_query/include/utils/xml.h +84 -0
- data/ext/pg_query/include/xxhash.h +5445 -0
- data/ext/pg_query/include/xxhash/xxhash.h +5445 -0
- data/ext/pg_query/pg_query.c +104 -0
- data/ext/pg_query/pg_query.pb-c.c +37628 -0
- data/ext/pg_query/pg_query_deparse.c +9953 -0
- data/ext/pg_query/pg_query_fingerprint.c +292 -0
- data/ext/pg_query/pg_query_fingerprint.h +8 -0
- data/ext/pg_query/pg_query_internal.h +24 -0
- data/ext/pg_query/pg_query_json_plpgsql.c +738 -0
- data/ext/pg_query/pg_query_json_plpgsql.h +9 -0
- data/ext/pg_query/pg_query_normalize.c +437 -0
- data/ext/pg_query/pg_query_outfuncs.h +10 -0
- data/ext/pg_query/pg_query_outfuncs_json.c +297 -0
- data/ext/pg_query/pg_query_outfuncs_protobuf.c +237 -0
- data/ext/pg_query/pg_query_parse.c +148 -0
- data/ext/pg_query/pg_query_parse_plpgsql.c +460 -0
- data/ext/pg_query/pg_query_readfuncs.h +11 -0
- data/ext/pg_query/pg_query_readfuncs_protobuf.c +142 -0
- data/ext/pg_query/pg_query_ruby.c +108 -12
- data/ext/pg_query/pg_query_scan.c +173 -0
- data/ext/pg_query/pg_query_split.c +221 -0
- data/ext/pg_query/protobuf-c.c +3660 -0
- data/ext/pg_query/src_backend_catalog_namespace.c +1051 -0
- data/ext/pg_query/src_backend_catalog_pg_proc.c +142 -0
- data/ext/pg_query/src_backend_commands_define.c +117 -0
- data/ext/pg_query/src_backend_libpq_pqcomm.c +651 -0
- data/ext/pg_query/src_backend_nodes_bitmapset.c +513 -0
- data/ext/pg_query/src_backend_nodes_copyfuncs.c +6013 -0
- data/ext/pg_query/src_backend_nodes_equalfuncs.c +4003 -0
- data/ext/pg_query/src_backend_nodes_extensible.c +99 -0
- data/ext/pg_query/src_backend_nodes_list.c +922 -0
- data/ext/pg_query/src_backend_nodes_makefuncs.c +417 -0
- data/ext/pg_query/src_backend_nodes_nodeFuncs.c +1363 -0
- data/ext/pg_query/src_backend_nodes_value.c +84 -0
- data/ext/pg_query/src_backend_parser_gram.c +47456 -0
- data/ext/pg_query/src_backend_parser_parse_expr.c +313 -0
- data/ext/pg_query/src_backend_parser_parser.c +497 -0
- data/ext/pg_query/src_backend_parser_scan.c +7091 -0
- data/ext/pg_query/src_backend_parser_scansup.c +160 -0
- data/ext/pg_query/src_backend_postmaster_postmaster.c +2230 -0
- data/ext/pg_query/src_backend_storage_ipc_ipc.c +192 -0
- data/ext/pg_query/src_backend_storage_lmgr_s_lock.c +370 -0
- data/ext/pg_query/src_backend_tcop_postgres.c +776 -0
- data/ext/pg_query/src_backend_utils_adt_datum.c +326 -0
- data/ext/pg_query/src_backend_utils_adt_expandeddatum.c +98 -0
- data/ext/pg_query/src_backend_utils_adt_format_type.c +136 -0
- data/ext/pg_query/src_backend_utils_adt_ruleutils.c +1683 -0
- data/ext/pg_query/src_backend_utils_error_assert.c +74 -0
- data/ext/pg_query/src_backend_utils_error_elog.c +1748 -0
- data/ext/pg_query/src_backend_utils_fmgr_fmgr.c +570 -0
- data/ext/pg_query/src_backend_utils_hash_dynahash.c +1086 -0
- data/ext/pg_query/src_backend_utils_init_globals.c +168 -0
- data/ext/pg_query/src_backend_utils_mb_mbutils.c +839 -0
- data/ext/pg_query/src_backend_utils_misc_guc.c +1831 -0
- data/ext/pg_query/src_backend_utils_mmgr_aset.c +1560 -0
- data/ext/pg_query/src_backend_utils_mmgr_mcxt.c +1006 -0
- data/ext/pg_query/src_common_encnames.c +158 -0
- data/ext/pg_query/src_common_keywords.c +39 -0
- data/ext/pg_query/src_common_kwlist_d.h +1081 -0
- data/ext/pg_query/src_common_kwlookup.c +91 -0
- data/ext/pg_query/src_common_psprintf.c +158 -0
- data/ext/pg_query/src_common_string.c +86 -0
- data/ext/pg_query/src_common_stringinfo.c +336 -0
- data/ext/pg_query/src_common_wchar.c +1651 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_comp.c +1133 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_funcs.c +877 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_gram.c +6533 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_handler.c +107 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_reserved_kwlist_d.h +123 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_scanner.c +671 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_unreserved_kwlist_d.h +255 -0
- data/ext/pg_query/src_port_erand48.c +127 -0
- data/ext/pg_query/src_port_pg_bitutils.c +246 -0
- data/ext/pg_query/src_port_pgsleep.c +69 -0
- data/ext/pg_query/src_port_pgstrcasecmp.c +83 -0
- data/ext/pg_query/src_port_qsort.c +240 -0
- data/ext/pg_query/src_port_random.c +31 -0
- data/ext/pg_query/src_port_snprintf.c +1449 -0
- data/ext/pg_query/src_port_strerror.c +324 -0
- data/ext/pg_query/src_port_strnlen.c +39 -0
- data/ext/pg_query/xxhash.c +43 -0
- data/lib/pg_query.rb +7 -4
- data/lib/pg_query/constants.rb +21 -0
- data/lib/pg_query/deparse.rb +16 -1117
- data/lib/pg_query/filter_columns.rb +86 -85
- data/lib/pg_query/fingerprint.rb +122 -87
- data/lib/pg_query/json_field_names.rb +1402 -0
- data/lib/pg_query/node.rb +31 -0
- data/lib/pg_query/param_refs.rb +42 -37
- data/lib/pg_query/parse.rb +220 -200
- data/lib/pg_query/parse_error.rb +1 -1
- data/lib/pg_query/pg_query_pb.rb +3211 -0
- data/lib/pg_query/scan.rb +23 -0
- data/lib/pg_query/treewalker.rb +24 -40
- data/lib/pg_query/truncate.rb +64 -43
- data/lib/pg_query/version.rb +2 -2
- metadata +473 -11
- data/ext/pg_query/pg_query_ruby.h +0 -10
- data/lib/pg_query/deep_dup.rb +0 -16
- data/lib/pg_query/deparse/alter_table.rb +0 -42
- data/lib/pg_query/deparse/interval.rb +0 -105
- data/lib/pg_query/legacy_parsetree.rb +0 -109
- data/lib/pg_query/node_types.rb +0 -284
@@ -0,0 +1,35 @@
|
|
1
|
+
/*
|
2
|
+
* rmgr.h
|
3
|
+
*
|
4
|
+
* Resource managers definition
|
5
|
+
*
|
6
|
+
* src/include/access/rmgr.h
|
7
|
+
*/
|
8
|
+
#ifndef RMGR_H
|
9
|
+
#define RMGR_H
|
10
|
+
|
11
|
+
typedef uint8 RmgrId;
|
12
|
+
|
13
|
+
/*
|
14
|
+
* Built-in resource managers
|
15
|
+
*
|
16
|
+
* The actual numerical values for each rmgr ID are defined by the order
|
17
|
+
* of entries in rmgrlist.h.
|
18
|
+
*
|
19
|
+
* Note: RM_MAX_ID must fit in RmgrId; widening that type will affect the XLOG
|
20
|
+
* file format.
|
21
|
+
*/
|
22
|
+
#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask) \
|
23
|
+
symname,
|
24
|
+
|
25
|
+
typedef enum RmgrIds
|
26
|
+
{
|
27
|
+
#include "access/rmgrlist.h"
|
28
|
+
RM_NEXT_ID
|
29
|
+
} RmgrIds;
|
30
|
+
|
31
|
+
#undef PG_RMGR
|
32
|
+
|
33
|
+
#define RM_MAX_ID (RM_NEXT_ID - 1)
|
34
|
+
|
35
|
+
#endif /* RMGR_H */
|
@@ -0,0 +1,49 @@
|
|
1
|
+
/*---------------------------------------------------------------------------
|
2
|
+
* rmgrlist.h
|
3
|
+
*
|
4
|
+
* The resource manager list is kept in its own source file for possible
|
5
|
+
* use by automatic tools. The exact representation of a rmgr is determined
|
6
|
+
* by the PG_RMGR macro, which is not defined in this file; it can be
|
7
|
+
* defined by the caller for special purposes.
|
8
|
+
*
|
9
|
+
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
10
|
+
* Portions Copyright (c) 1994, Regents of the University of California
|
11
|
+
*
|
12
|
+
* src/include/access/rmgrlist.h
|
13
|
+
*---------------------------------------------------------------------------
|
14
|
+
*/
|
15
|
+
|
16
|
+
/* there is deliberately not an #ifndef RMGRLIST_H here */
|
17
|
+
|
18
|
+
/*
|
19
|
+
* List of resource manager entries. Note that order of entries defines the
|
20
|
+
* numerical values of each rmgr's ID, which is stored in WAL records. New
|
21
|
+
* entries should be added at the end, to avoid changing IDs of existing
|
22
|
+
* entries.
|
23
|
+
*
|
24
|
+
* Changes to this list possibly need an XLOG_PAGE_MAGIC bump.
|
25
|
+
*/
|
26
|
+
|
27
|
+
/* symbol name, textual name, redo, desc, identify, startup, cleanup */
|
28
|
+
PG_RMGR(RM_XLOG_ID, "XLOG", xlog_redo, xlog_desc, xlog_identify, NULL, NULL, NULL)
|
29
|
+
PG_RMGR(RM_XACT_ID, "Transaction", xact_redo, xact_desc, xact_identify, NULL, NULL, NULL)
|
30
|
+
PG_RMGR(RM_SMGR_ID, "Storage", smgr_redo, smgr_desc, smgr_identify, NULL, NULL, NULL)
|
31
|
+
PG_RMGR(RM_CLOG_ID, "CLOG", clog_redo, clog_desc, clog_identify, NULL, NULL, NULL)
|
32
|
+
PG_RMGR(RM_DBASE_ID, "Database", dbase_redo, dbase_desc, dbase_identify, NULL, NULL, NULL)
|
33
|
+
PG_RMGR(RM_TBLSPC_ID, "Tablespace", tblspc_redo, tblspc_desc, tblspc_identify, NULL, NULL, NULL)
|
34
|
+
PG_RMGR(RM_MULTIXACT_ID, "MultiXact", multixact_redo, multixact_desc, multixact_identify, NULL, NULL, NULL)
|
35
|
+
PG_RMGR(RM_RELMAP_ID, "RelMap", relmap_redo, relmap_desc, relmap_identify, NULL, NULL, NULL)
|
36
|
+
PG_RMGR(RM_STANDBY_ID, "Standby", standby_redo, standby_desc, standby_identify, NULL, NULL, NULL)
|
37
|
+
PG_RMGR(RM_HEAP2_ID, "Heap2", heap2_redo, heap2_desc, heap2_identify, NULL, NULL, heap_mask)
|
38
|
+
PG_RMGR(RM_HEAP_ID, "Heap", heap_redo, heap_desc, heap_identify, NULL, NULL, heap_mask)
|
39
|
+
PG_RMGR(RM_BTREE_ID, "Btree", btree_redo, btree_desc, btree_identify, btree_xlog_startup, btree_xlog_cleanup, btree_mask)
|
40
|
+
PG_RMGR(RM_HASH_ID, "Hash", hash_redo, hash_desc, hash_identify, NULL, NULL, hash_mask)
|
41
|
+
PG_RMGR(RM_GIN_ID, "Gin", gin_redo, gin_desc, gin_identify, gin_xlog_startup, gin_xlog_cleanup, gin_mask)
|
42
|
+
PG_RMGR(RM_GIST_ID, "Gist", gist_redo, gist_desc, gist_identify, gist_xlog_startup, gist_xlog_cleanup, gist_mask)
|
43
|
+
PG_RMGR(RM_SEQ_ID, "Sequence", seq_redo, seq_desc, seq_identify, NULL, NULL, seq_mask)
|
44
|
+
PG_RMGR(RM_SPGIST_ID, "SPGist", spg_redo, spg_desc, spg_identify, spg_xlog_startup, spg_xlog_cleanup, spg_mask)
|
45
|
+
PG_RMGR(RM_BRIN_ID, "BRIN", brin_redo, brin_desc, brin_identify, NULL, NULL, brin_mask)
|
46
|
+
PG_RMGR(RM_COMMIT_TS_ID, "CommitTs", commit_ts_redo, commit_ts_desc, commit_ts_identify, NULL, NULL, NULL)
|
47
|
+
PG_RMGR(RM_REPLORIGIN_ID, "ReplicationOrigin", replorigin_redo, replorigin_desc, replorigin_identify, NULL, NULL, NULL)
|
48
|
+
PG_RMGR(RM_GENERIC_ID, "Generic", generic_redo, generic_desc, generic_identify, NULL, NULL, generic_mask)
|
49
|
+
PG_RMGR(RM_LOGICALMSG_ID, "LogicalMessage", logicalmsg_redo, logicalmsg_desc, logicalmsg_identify, NULL, NULL, NULL)
|
@@ -0,0 +1,58 @@
|
|
1
|
+
/*-------------------------------------------------------------------------
|
2
|
+
*
|
3
|
+
* sdir.h
|
4
|
+
* POSTGRES scan direction definitions.
|
5
|
+
*
|
6
|
+
*
|
7
|
+
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
8
|
+
* Portions Copyright (c) 1994, Regents of the University of California
|
9
|
+
*
|
10
|
+
* src/include/access/sdir.h
|
11
|
+
*
|
12
|
+
*-------------------------------------------------------------------------
|
13
|
+
*/
|
14
|
+
#ifndef SDIR_H
|
15
|
+
#define SDIR_H
|
16
|
+
|
17
|
+
|
18
|
+
/*
|
19
|
+
* ScanDirection was an int8 for no apparent reason. I kept the original
|
20
|
+
* values because I'm not sure if I'll break anything otherwise. -ay 2/95
|
21
|
+
*/
|
22
|
+
typedef enum ScanDirection
|
23
|
+
{
|
24
|
+
BackwardScanDirection = -1,
|
25
|
+
NoMovementScanDirection = 0,
|
26
|
+
ForwardScanDirection = 1
|
27
|
+
} ScanDirection;
|
28
|
+
|
29
|
+
/*
|
30
|
+
* ScanDirectionIsValid
|
31
|
+
* True iff scan direction is valid.
|
32
|
+
*/
|
33
|
+
#define ScanDirectionIsValid(direction) \
|
34
|
+
((bool) (BackwardScanDirection <= (direction) && \
|
35
|
+
(direction) <= ForwardScanDirection))
|
36
|
+
|
37
|
+
/*
|
38
|
+
* ScanDirectionIsBackward
|
39
|
+
* True iff scan direction is backward.
|
40
|
+
*/
|
41
|
+
#define ScanDirectionIsBackward(direction) \
|
42
|
+
((bool) ((direction) == BackwardScanDirection))
|
43
|
+
|
44
|
+
/*
|
45
|
+
* ScanDirectionIsNoMovement
|
46
|
+
* True iff scan direction indicates no movement.
|
47
|
+
*/
|
48
|
+
#define ScanDirectionIsNoMovement(direction) \
|
49
|
+
((bool) ((direction) == NoMovementScanDirection))
|
50
|
+
|
51
|
+
/*
|
52
|
+
* ScanDirectionIsForward
|
53
|
+
* True iff scan direction is forward.
|
54
|
+
*/
|
55
|
+
#define ScanDirectionIsForward(direction) \
|
56
|
+
((bool) ((direction) == ForwardScanDirection))
|
57
|
+
|
58
|
+
#endif /* SDIR_H */
|
@@ -0,0 +1,151 @@
|
|
1
|
+
/*-------------------------------------------------------------------------
|
2
|
+
*
|
3
|
+
* skey.h
|
4
|
+
* POSTGRES scan key definitions.
|
5
|
+
*
|
6
|
+
*
|
7
|
+
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
8
|
+
* Portions Copyright (c) 1994, Regents of the University of California
|
9
|
+
*
|
10
|
+
* src/include/access/skey.h
|
11
|
+
*
|
12
|
+
*-------------------------------------------------------------------------
|
13
|
+
*/
|
14
|
+
#ifndef SKEY_H
|
15
|
+
#define SKEY_H
|
16
|
+
|
17
|
+
#include "access/attnum.h"
|
18
|
+
#include "access/stratnum.h"
|
19
|
+
#include "fmgr.h"
|
20
|
+
|
21
|
+
|
22
|
+
/*
|
23
|
+
* A ScanKey represents the application of a comparison operator between
|
24
|
+
* a table or index column and a constant. When it's part of an array of
|
25
|
+
* ScanKeys, the comparison conditions are implicitly ANDed. The index
|
26
|
+
* column is the left argument of the operator, if it's a binary operator.
|
27
|
+
* (The data structure can support unary indexable operators too; in that
|
28
|
+
* case sk_argument would go unused. This is not currently implemented.)
|
29
|
+
*
|
30
|
+
* For an index scan, sk_strategy and sk_subtype must be set correctly for
|
31
|
+
* the operator. When using a ScanKey in a heap scan, these fields are not
|
32
|
+
* used and may be set to InvalidStrategy/InvalidOid.
|
33
|
+
*
|
34
|
+
* If the operator is collation-sensitive, sk_collation must be set
|
35
|
+
* correctly as well.
|
36
|
+
*
|
37
|
+
* A ScanKey can also represent a ScalarArrayOpExpr, that is a condition
|
38
|
+
* "column op ANY(ARRAY[...])". This is signaled by the SK_SEARCHARRAY
|
39
|
+
* flag bit. The sk_argument is not a value of the operator's right-hand
|
40
|
+
* argument type, but rather an array of such values, and the per-element
|
41
|
+
* comparisons are to be ORed together.
|
42
|
+
*
|
43
|
+
* A ScanKey can also represent a condition "column IS NULL" or "column
|
44
|
+
* IS NOT NULL"; these cases are signaled by the SK_SEARCHNULL and
|
45
|
+
* SK_SEARCHNOTNULL flag bits respectively. The argument is always NULL,
|
46
|
+
* and the sk_strategy, sk_subtype, sk_collation, and sk_func fields are
|
47
|
+
* not used (unless set by the index AM).
|
48
|
+
*
|
49
|
+
* SK_SEARCHARRAY, SK_SEARCHNULL and SK_SEARCHNOTNULL are supported only
|
50
|
+
* for index scans, not heap scans; and not all index AMs support them,
|
51
|
+
* only those that set amsearcharray or amsearchnulls respectively.
|
52
|
+
*
|
53
|
+
* A ScanKey can also represent an ordering operator invocation, that is
|
54
|
+
* an ordering requirement "ORDER BY indexedcol op constant". This looks
|
55
|
+
* the same as a comparison operator, except that the operator doesn't
|
56
|
+
* (usually) yield boolean. We mark such ScanKeys with SK_ORDER_BY.
|
57
|
+
* SK_SEARCHARRAY, SK_SEARCHNULL, SK_SEARCHNOTNULL cannot be used here.
|
58
|
+
*
|
59
|
+
* Note: in some places, ScanKeys are used as a convenient representation
|
60
|
+
* for the invocation of an access method support procedure. In this case
|
61
|
+
* sk_strategy/sk_subtype are not meaningful (but sk_collation can be); and
|
62
|
+
* sk_func may refer to a function that returns something other than boolean.
|
63
|
+
*/
|
64
|
+
typedef struct ScanKeyData
|
65
|
+
{
|
66
|
+
int sk_flags; /* flags, see below */
|
67
|
+
AttrNumber sk_attno; /* table or index column number */
|
68
|
+
StrategyNumber sk_strategy; /* operator strategy number */
|
69
|
+
Oid sk_subtype; /* strategy subtype */
|
70
|
+
Oid sk_collation; /* collation to use, if needed */
|
71
|
+
FmgrInfo sk_func; /* lookup info for function to call */
|
72
|
+
Datum sk_argument; /* data to compare */
|
73
|
+
} ScanKeyData;
|
74
|
+
|
75
|
+
typedef ScanKeyData *ScanKey;
|
76
|
+
|
77
|
+
/*
|
78
|
+
* About row comparisons:
|
79
|
+
*
|
80
|
+
* The ScanKey data structure also supports row comparisons, that is ordered
|
81
|
+
* tuple comparisons like (x, y) > (c1, c2), having the SQL-spec semantics
|
82
|
+
* "x > c1 OR (x = c1 AND y > c2)". Note that this is currently only
|
83
|
+
* implemented for btree index searches, not for heapscans or any other index
|
84
|
+
* type. A row comparison is represented by a "header" ScanKey entry plus
|
85
|
+
* a separate array of ScanKeys, one for each column of the row comparison.
|
86
|
+
* The header entry has these properties:
|
87
|
+
* sk_flags = SK_ROW_HEADER
|
88
|
+
* sk_attno = index column number for leading column of row comparison
|
89
|
+
* sk_strategy = btree strategy code for semantics of row comparison
|
90
|
+
* (ie, < <= > or >=)
|
91
|
+
* sk_subtype, sk_collation, sk_func: not used
|
92
|
+
* sk_argument: pointer to subsidiary ScanKey array
|
93
|
+
* If the header is part of a ScanKey array that's sorted by attno, it
|
94
|
+
* must be sorted according to the leading column number.
|
95
|
+
*
|
96
|
+
* The subsidiary ScanKey array appears in logical column order of the row
|
97
|
+
* comparison, which may be different from index column order. The array
|
98
|
+
* elements are like a normal ScanKey array except that:
|
99
|
+
* sk_flags must include SK_ROW_MEMBER, plus SK_ROW_END in the last
|
100
|
+
* element (needed since row header does not include a count)
|
101
|
+
* sk_func points to the btree comparison support function for the
|
102
|
+
* opclass, NOT the operator's implementation function.
|
103
|
+
* sk_strategy must be the same in all elements of the subsidiary array,
|
104
|
+
* that is, the same as in the header entry.
|
105
|
+
* SK_SEARCHARRAY, SK_SEARCHNULL, SK_SEARCHNOTNULL cannot be used here.
|
106
|
+
*/
|
107
|
+
|
108
|
+
/*
|
109
|
+
* ScanKeyData sk_flags
|
110
|
+
*
|
111
|
+
* sk_flags bits 0-15 are reserved for system-wide use (symbols for those
|
112
|
+
* bits should be defined here). Bits 16-31 are reserved for use within
|
113
|
+
* individual index access methods.
|
114
|
+
*/
|
115
|
+
#define SK_ISNULL 0x0001 /* sk_argument is NULL */
|
116
|
+
#define SK_UNARY 0x0002 /* unary operator (not supported!) */
|
117
|
+
#define SK_ROW_HEADER 0x0004 /* row comparison header (see above) */
|
118
|
+
#define SK_ROW_MEMBER 0x0008 /* row comparison member (see above) */
|
119
|
+
#define SK_ROW_END 0x0010 /* last row comparison member */
|
120
|
+
#define SK_SEARCHARRAY 0x0020 /* scankey represents ScalarArrayOp */
|
121
|
+
#define SK_SEARCHNULL 0x0040 /* scankey represents "col IS NULL" */
|
122
|
+
#define SK_SEARCHNOTNULL 0x0080 /* scankey represents "col IS NOT NULL" */
|
123
|
+
#define SK_ORDER_BY 0x0100 /* scankey is for ORDER BY op */
|
124
|
+
|
125
|
+
|
126
|
+
/*
|
127
|
+
* prototypes for functions in access/common/scankey.c
|
128
|
+
*/
|
129
|
+
extern void ScanKeyInit(ScanKey entry,
|
130
|
+
AttrNumber attributeNumber,
|
131
|
+
StrategyNumber strategy,
|
132
|
+
RegProcedure procedure,
|
133
|
+
Datum argument);
|
134
|
+
extern void ScanKeyEntryInitialize(ScanKey entry,
|
135
|
+
int flags,
|
136
|
+
AttrNumber attributeNumber,
|
137
|
+
StrategyNumber strategy,
|
138
|
+
Oid subtype,
|
139
|
+
Oid collation,
|
140
|
+
RegProcedure procedure,
|
141
|
+
Datum argument);
|
142
|
+
extern void ScanKeyEntryInitializeWithInfo(ScanKey entry,
|
143
|
+
int flags,
|
144
|
+
AttrNumber attributeNumber,
|
145
|
+
StrategyNumber strategy,
|
146
|
+
Oid subtype,
|
147
|
+
Oid collation,
|
148
|
+
FmgrInfo *finfo,
|
149
|
+
Datum argument);
|
150
|
+
|
151
|
+
#endif /* SKEY_H */
|
@@ -0,0 +1,83 @@
|
|
1
|
+
/*-------------------------------------------------------------------------
|
2
|
+
*
|
3
|
+
* stratnum.h
|
4
|
+
* POSTGRES strategy number definitions.
|
5
|
+
*
|
6
|
+
*
|
7
|
+
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
8
|
+
* Portions Copyright (c) 1994, Regents of the University of California
|
9
|
+
*
|
10
|
+
* src/include/access/stratnum.h
|
11
|
+
*
|
12
|
+
*-------------------------------------------------------------------------
|
13
|
+
*/
|
14
|
+
#ifndef STRATNUM_H
|
15
|
+
#define STRATNUM_H
|
16
|
+
|
17
|
+
/*
|
18
|
+
* Strategy numbers identify the semantics that particular operators have
|
19
|
+
* with respect to particular operator classes. In some cases a strategy
|
20
|
+
* subtype (an OID) is used as further information.
|
21
|
+
*/
|
22
|
+
typedef uint16 StrategyNumber;
|
23
|
+
|
24
|
+
#define InvalidStrategy ((StrategyNumber) 0)
|
25
|
+
|
26
|
+
/*
|
27
|
+
* Strategy numbers for B-tree indexes.
|
28
|
+
*/
|
29
|
+
#define BTLessStrategyNumber 1
|
30
|
+
#define BTLessEqualStrategyNumber 2
|
31
|
+
#define BTEqualStrategyNumber 3
|
32
|
+
#define BTGreaterEqualStrategyNumber 4
|
33
|
+
#define BTGreaterStrategyNumber 5
|
34
|
+
|
35
|
+
#define BTMaxStrategyNumber 5
|
36
|
+
|
37
|
+
/*
|
38
|
+
* Strategy numbers for hash indexes. There's only one valid strategy for
|
39
|
+
* hashing: equality.
|
40
|
+
*/
|
41
|
+
#define HTEqualStrategyNumber 1
|
42
|
+
|
43
|
+
#define HTMaxStrategyNumber 1
|
44
|
+
|
45
|
+
/*
|
46
|
+
* Strategy numbers common to (some) GiST, SP-GiST and BRIN opclasses.
|
47
|
+
*
|
48
|
+
* The first few of these come from the R-Tree indexing method (hence the
|
49
|
+
* names); the others have been added over time as they have been needed.
|
50
|
+
*/
|
51
|
+
#define RTLeftStrategyNumber 1 /* for << */
|
52
|
+
#define RTOverLeftStrategyNumber 2 /* for &< */
|
53
|
+
#define RTOverlapStrategyNumber 3 /* for && */
|
54
|
+
#define RTOverRightStrategyNumber 4 /* for &> */
|
55
|
+
#define RTRightStrategyNumber 5 /* for >> */
|
56
|
+
#define RTSameStrategyNumber 6 /* for ~= */
|
57
|
+
#define RTContainsStrategyNumber 7 /* for @> */
|
58
|
+
#define RTContainedByStrategyNumber 8 /* for <@ */
|
59
|
+
#define RTOverBelowStrategyNumber 9 /* for &<| */
|
60
|
+
#define RTBelowStrategyNumber 10 /* for <<| */
|
61
|
+
#define RTAboveStrategyNumber 11 /* for |>> */
|
62
|
+
#define RTOverAboveStrategyNumber 12 /* for |&> */
|
63
|
+
#define RTOldContainsStrategyNumber 13 /* for old spelling of @> */
|
64
|
+
#define RTOldContainedByStrategyNumber 14 /* for old spelling of <@ */
|
65
|
+
#define RTKNNSearchStrategyNumber 15 /* for <-> (distance) */
|
66
|
+
#define RTContainsElemStrategyNumber 16 /* for range types @> elem */
|
67
|
+
#define RTAdjacentStrategyNumber 17 /* for -|- */
|
68
|
+
#define RTEqualStrategyNumber 18 /* for = */
|
69
|
+
#define RTNotEqualStrategyNumber 19 /* for != */
|
70
|
+
#define RTLessStrategyNumber 20 /* for < */
|
71
|
+
#define RTLessEqualStrategyNumber 21 /* for <= */
|
72
|
+
#define RTGreaterStrategyNumber 22 /* for > */
|
73
|
+
#define RTGreaterEqualStrategyNumber 23 /* for >= */
|
74
|
+
#define RTSubStrategyNumber 24 /* for inet >> */
|
75
|
+
#define RTSubEqualStrategyNumber 25 /* for inet <<= */
|
76
|
+
#define RTSuperStrategyNumber 26 /* for inet << */
|
77
|
+
#define RTSuperEqualStrategyNumber 27 /* for inet >>= */
|
78
|
+
#define RTPrefixStrategyNumber 28 /* for text ^@ */
|
79
|
+
|
80
|
+
#define RTMaxStrategyNumber 28
|
81
|
+
|
82
|
+
|
83
|
+
#endif /* STRATNUM_H */
|
@@ -0,0 +1,29 @@
|
|
1
|
+
/*-------------------------------------------------------------------------
|
2
|
+
*
|
3
|
+
* sysattr.h
|
4
|
+
* POSTGRES system attribute definitions.
|
5
|
+
*
|
6
|
+
*
|
7
|
+
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
8
|
+
* Portions Copyright (c) 1994, Regents of the University of California
|
9
|
+
*
|
10
|
+
* src/include/access/sysattr.h
|
11
|
+
*
|
12
|
+
*-------------------------------------------------------------------------
|
13
|
+
*/
|
14
|
+
#ifndef SYSATTR_H
|
15
|
+
#define SYSATTR_H
|
16
|
+
|
17
|
+
|
18
|
+
/*
|
19
|
+
* Attribute numbers for the system-defined attributes
|
20
|
+
*/
|
21
|
+
#define SelfItemPointerAttributeNumber (-1)
|
22
|
+
#define MinTransactionIdAttributeNumber (-2)
|
23
|
+
#define MinCommandIdAttributeNumber (-3)
|
24
|
+
#define MaxTransactionIdAttributeNumber (-4)
|
25
|
+
#define MaxCommandIdAttributeNumber (-5)
|
26
|
+
#define TableOidAttributeNumber (-6)
|
27
|
+
#define FirstLowInvalidHeapAttributeNumber (-7)
|
28
|
+
|
29
|
+
#endif /* SYSATTR_H */
|
@@ -0,0 +1,27 @@
|
|
1
|
+
/*-------------------------------------------------------------------------
|
2
|
+
*
|
3
|
+
* table.h
|
4
|
+
* Generic routines for table related code.
|
5
|
+
*
|
6
|
+
*
|
7
|
+
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
8
|
+
* Portions Copyright (c) 1994, Regents of the University of California
|
9
|
+
*
|
10
|
+
* src/include/access/table.h
|
11
|
+
*
|
12
|
+
*-------------------------------------------------------------------------
|
13
|
+
*/
|
14
|
+
#ifndef TABLE_H
|
15
|
+
#define TABLE_H
|
16
|
+
|
17
|
+
#include "nodes/primnodes.h"
|
18
|
+
#include "storage/lockdefs.h"
|
19
|
+
#include "utils/relcache.h"
|
20
|
+
|
21
|
+
extern Relation table_open(Oid relationId, LOCKMODE lockmode);
|
22
|
+
extern Relation table_openrv(const RangeVar *relation, LOCKMODE lockmode);
|
23
|
+
extern Relation table_openrv_extended(const RangeVar *relation,
|
24
|
+
LOCKMODE lockmode, bool missing_ok);
|
25
|
+
extern void table_close(Relation relation, LOCKMODE lockmode);
|
26
|
+
|
27
|
+
#endif /* TABLE_H */
|
@@ -0,0 +1,1825 @@
|
|
1
|
+
/*-------------------------------------------------------------------------
|
2
|
+
*
|
3
|
+
* tableam.h
|
4
|
+
* POSTGRES table access method definitions.
|
5
|
+
*
|
6
|
+
*
|
7
|
+
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
8
|
+
* Portions Copyright (c) 1994, Regents of the University of California
|
9
|
+
*
|
10
|
+
* src/include/access/tableam.h
|
11
|
+
*
|
12
|
+
* NOTES
|
13
|
+
* See tableam.sgml for higher level documentation.
|
14
|
+
*
|
15
|
+
*-------------------------------------------------------------------------
|
16
|
+
*/
|
17
|
+
#ifndef TABLEAM_H
|
18
|
+
#define TABLEAM_H
|
19
|
+
|
20
|
+
#include "access/relscan.h"
|
21
|
+
#include "access/sdir.h"
|
22
|
+
#include "utils/guc.h"
|
23
|
+
#include "utils/rel.h"
|
24
|
+
#include "utils/snapshot.h"
|
25
|
+
|
26
|
+
|
27
|
+
#define DEFAULT_TABLE_ACCESS_METHOD "heap"
|
28
|
+
|
29
|
+
/* GUCs */
|
30
|
+
extern char *default_table_access_method;
|
31
|
+
extern bool synchronize_seqscans;
|
32
|
+
|
33
|
+
|
34
|
+
struct BulkInsertStateData;
|
35
|
+
struct IndexInfo;
|
36
|
+
struct SampleScanState;
|
37
|
+
struct TBMIterateResult;
|
38
|
+
struct VacuumParams;
|
39
|
+
struct ValidateIndexState;
|
40
|
+
|
41
|
+
/*
|
42
|
+
* Bitmask values for the flags argument to the scan_begin callback.
|
43
|
+
*/
|
44
|
+
typedef enum ScanOptions
|
45
|
+
{
|
46
|
+
/* one of SO_TYPE_* may be specified */
|
47
|
+
SO_TYPE_SEQSCAN = 1 << 0,
|
48
|
+
SO_TYPE_BITMAPSCAN = 1 << 1,
|
49
|
+
SO_TYPE_SAMPLESCAN = 1 << 2,
|
50
|
+
SO_TYPE_TIDSCAN = 1 << 3,
|
51
|
+
SO_TYPE_ANALYZE = 1 << 4,
|
52
|
+
|
53
|
+
/* several of SO_ALLOW_* may be specified */
|
54
|
+
/* allow or disallow use of access strategy */
|
55
|
+
SO_ALLOW_STRAT = 1 << 5,
|
56
|
+
/* report location to syncscan logic? */
|
57
|
+
SO_ALLOW_SYNC = 1 << 6,
|
58
|
+
/* verify visibility page-at-a-time? */
|
59
|
+
SO_ALLOW_PAGEMODE = 1 << 7,
|
60
|
+
|
61
|
+
/* unregister snapshot at scan end? */
|
62
|
+
SO_TEMP_SNAPSHOT = 1 << 8
|
63
|
+
} ScanOptions;
|
64
|
+
|
65
|
+
/*
|
66
|
+
* Result codes for table_{update,delete,lock_tuple}, and for visibility
|
67
|
+
* routines inside table AMs.
|
68
|
+
*/
|
69
|
+
typedef enum TM_Result
|
70
|
+
{
|
71
|
+
/*
|
72
|
+
* Signals that the action succeeded (i.e. update/delete performed, lock
|
73
|
+
* was acquired)
|
74
|
+
*/
|
75
|
+
TM_Ok,
|
76
|
+
|
77
|
+
/* The affected tuple wasn't visible to the relevant snapshot */
|
78
|
+
TM_Invisible,
|
79
|
+
|
80
|
+
/* The affected tuple was already modified by the calling backend */
|
81
|
+
TM_SelfModified,
|
82
|
+
|
83
|
+
/*
|
84
|
+
* The affected tuple was updated by another transaction. This includes
|
85
|
+
* the case where tuple was moved to another partition.
|
86
|
+
*/
|
87
|
+
TM_Updated,
|
88
|
+
|
89
|
+
/* The affected tuple was deleted by another transaction */
|
90
|
+
TM_Deleted,
|
91
|
+
|
92
|
+
/*
|
93
|
+
* The affected tuple is currently being modified by another session. This
|
94
|
+
* will only be returned if table_(update/delete/lock_tuple) are
|
95
|
+
* instructed not to wait.
|
96
|
+
*/
|
97
|
+
TM_BeingModified,
|
98
|
+
|
99
|
+
/* lock couldn't be acquired, action skipped. Only used by lock_tuple */
|
100
|
+
TM_WouldBlock
|
101
|
+
} TM_Result;
|
102
|
+
|
103
|
+
/*
|
104
|
+
* When table_tuple_update, table_tuple_delete, or table_tuple_lock fail
|
105
|
+
* because the target tuple is already outdated, they fill in this struct to
|
106
|
+
* provide information to the caller about what happened.
|
107
|
+
*
|
108
|
+
* ctid is the target's ctid link: it is the same as the target's TID if the
|
109
|
+
* target was deleted, or the location of the replacement tuple if the target
|
110
|
+
* was updated.
|
111
|
+
*
|
112
|
+
* xmax is the outdating transaction's XID. If the caller wants to visit the
|
113
|
+
* replacement tuple, it must check that this matches before believing the
|
114
|
+
* replacement is really a match.
|
115
|
+
*
|
116
|
+
* cmax is the outdating command's CID, but only when the failure code is
|
117
|
+
* TM_SelfModified (i.e., something in the current transaction outdated the
|
118
|
+
* tuple); otherwise cmax is zero. (We make this restriction because
|
119
|
+
* HeapTupleHeaderGetCmax doesn't work for tuples outdated in other
|
120
|
+
* transactions.)
|
121
|
+
*/
|
122
|
+
typedef struct TM_FailureData
|
123
|
+
{
|
124
|
+
ItemPointerData ctid;
|
125
|
+
TransactionId xmax;
|
126
|
+
CommandId cmax;
|
127
|
+
bool traversed;
|
128
|
+
} TM_FailureData;
|
129
|
+
|
130
|
+
/* "options" flag bits for table_tuple_insert */
|
131
|
+
/* TABLE_INSERT_SKIP_WAL was 0x0001; RelationNeedsWAL() now governs */
|
132
|
+
#define TABLE_INSERT_SKIP_FSM 0x0002
|
133
|
+
#define TABLE_INSERT_FROZEN 0x0004
|
134
|
+
#define TABLE_INSERT_NO_LOGICAL 0x0008
|
135
|
+
|
136
|
+
/* flag bits for table_tuple_lock */
|
137
|
+
/* Follow tuples whose update is in progress if lock modes don't conflict */
|
138
|
+
#define TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS (1 << 0)
|
139
|
+
/* Follow update chain and lock latest version of tuple */
|
140
|
+
#define TUPLE_LOCK_FLAG_FIND_LAST_VERSION (1 << 1)
|
141
|
+
|
142
|
+
|
143
|
+
/* Typedef for callback function for table_index_build_scan */
|
144
|
+
typedef void (*IndexBuildCallback) (Relation index,
|
145
|
+
ItemPointer tid,
|
146
|
+
Datum *values,
|
147
|
+
bool *isnull,
|
148
|
+
bool tupleIsAlive,
|
149
|
+
void *state);
|
150
|
+
|
151
|
+
/*
|
152
|
+
* API struct for a table AM. Note this must be allocated in a
|
153
|
+
* server-lifetime manner, typically as a static const struct, which then gets
|
154
|
+
* returned by FormData_pg_am.amhandler.
|
155
|
+
*
|
156
|
+
* In most cases it's not appropriate to call the callbacks directly, use the
|
157
|
+
* table_* wrapper functions instead.
|
158
|
+
*
|
159
|
+
* GetTableAmRoutine() asserts that required callbacks are filled in, remember
|
160
|
+
* to update when adding a callback.
|
161
|
+
*/
|
162
|
+
typedef struct TableAmRoutine
|
163
|
+
{
|
164
|
+
/* this must be set to T_TableAmRoutine */
|
165
|
+
NodeTag type;
|
166
|
+
|
167
|
+
|
168
|
+
/* ------------------------------------------------------------------------
|
169
|
+
* Slot related callbacks.
|
170
|
+
* ------------------------------------------------------------------------
|
171
|
+
*/
|
172
|
+
|
173
|
+
/*
|
174
|
+
* Return slot implementation suitable for storing a tuple of this AM.
|
175
|
+
*/
|
176
|
+
const TupleTableSlotOps *(*slot_callbacks) (Relation rel);
|
177
|
+
|
178
|
+
|
179
|
+
/* ------------------------------------------------------------------------
|
180
|
+
* Table scan callbacks.
|
181
|
+
* ------------------------------------------------------------------------
|
182
|
+
*/
|
183
|
+
|
184
|
+
/*
|
185
|
+
* Start a scan of `rel`. The callback has to return a TableScanDesc,
|
186
|
+
* which will typically be embedded in a larger, AM specific, struct.
|
187
|
+
*
|
188
|
+
* If nkeys != 0, the results need to be filtered by those scan keys.
|
189
|
+
*
|
190
|
+
* pscan, if not NULL, will have already been initialized with
|
191
|
+
* parallelscan_initialize(), and has to be for the same relation. Will
|
192
|
+
* only be set coming from table_beginscan_parallel().
|
193
|
+
*
|
194
|
+
* `flags` is a bitmask indicating the type of scan (ScanOptions's
|
195
|
+
* SO_TYPE_*, currently only one may be specified), options controlling
|
196
|
+
* the scan's behaviour (ScanOptions's SO_ALLOW_*, several may be
|
197
|
+
* specified, an AM may ignore unsupported ones) and whether the snapshot
|
198
|
+
* needs to be deallocated at scan_end (ScanOptions's SO_TEMP_SNAPSHOT).
|
199
|
+
*/
|
200
|
+
TableScanDesc (*scan_begin) (Relation rel,
|
201
|
+
Snapshot snapshot,
|
202
|
+
int nkeys, struct ScanKeyData *key,
|
203
|
+
ParallelTableScanDesc pscan,
|
204
|
+
uint32 flags);
|
205
|
+
|
206
|
+
/*
|
207
|
+
* Release resources and deallocate scan. If TableScanDesc.temp_snap,
|
208
|
+
* TableScanDesc.rs_snapshot needs to be unregistered.
|
209
|
+
*/
|
210
|
+
void (*scan_end) (TableScanDesc scan);
|
211
|
+
|
212
|
+
/*
|
213
|
+
* Restart relation scan. If set_params is set to true, allow_{strat,
|
214
|
+
* sync, pagemode} (see scan_begin) changes should be taken into account.
|
215
|
+
*/
|
216
|
+
void (*scan_rescan) (TableScanDesc scan, struct ScanKeyData *key,
|
217
|
+
bool set_params, bool allow_strat,
|
218
|
+
bool allow_sync, bool allow_pagemode);
|
219
|
+
|
220
|
+
/*
|
221
|
+
* Return next tuple from `scan`, store in slot.
|
222
|
+
*/
|
223
|
+
bool (*scan_getnextslot) (TableScanDesc scan,
|
224
|
+
ScanDirection direction,
|
225
|
+
TupleTableSlot *slot);
|
226
|
+
|
227
|
+
|
228
|
+
/* ------------------------------------------------------------------------
|
229
|
+
* Parallel table scan related functions.
|
230
|
+
* ------------------------------------------------------------------------
|
231
|
+
*/
|
232
|
+
|
233
|
+
/*
|
234
|
+
* Estimate the size of shared memory needed for a parallel scan of this
|
235
|
+
* relation. The snapshot does not need to be accounted for.
|
236
|
+
*/
|
237
|
+
Size (*parallelscan_estimate) (Relation rel);
|
238
|
+
|
239
|
+
/*
|
240
|
+
* Initialize ParallelTableScanDesc for a parallel scan of this relation.
|
241
|
+
* `pscan` will be sized according to parallelscan_estimate() for the same
|
242
|
+
* relation.
|
243
|
+
*/
|
244
|
+
Size (*parallelscan_initialize) (Relation rel,
|
245
|
+
ParallelTableScanDesc pscan);
|
246
|
+
|
247
|
+
/*
|
248
|
+
* Reinitialize `pscan` for a new scan. `rel` will be the same relation as
|
249
|
+
* when `pscan` was initialized by parallelscan_initialize.
|
250
|
+
*/
|
251
|
+
void (*parallelscan_reinitialize) (Relation rel,
|
252
|
+
ParallelTableScanDesc pscan);
|
253
|
+
|
254
|
+
|
255
|
+
/* ------------------------------------------------------------------------
|
256
|
+
* Index Scan Callbacks
|
257
|
+
* ------------------------------------------------------------------------
|
258
|
+
*/
|
259
|
+
|
260
|
+
/*
|
261
|
+
* Prepare to fetch tuples from the relation, as needed when fetching
|
262
|
+
* tuples for an index scan. The callback has to return an
|
263
|
+
* IndexFetchTableData, which the AM will typically embed in a larger
|
264
|
+
* structure with additional information.
|
265
|
+
*
|
266
|
+
* Tuples for an index scan can then be fetched via index_fetch_tuple.
|
267
|
+
*/
|
268
|
+
struct IndexFetchTableData *(*index_fetch_begin) (Relation rel);
|
269
|
+
|
270
|
+
/*
|
271
|
+
* Reset index fetch. Typically this will release cross index fetch
|
272
|
+
* resources held in IndexFetchTableData.
|
273
|
+
*/
|
274
|
+
void (*index_fetch_reset) (struct IndexFetchTableData *data);
|
275
|
+
|
276
|
+
/*
|
277
|
+
* Release resources and deallocate index fetch.
|
278
|
+
*/
|
279
|
+
void (*index_fetch_end) (struct IndexFetchTableData *data);
|
280
|
+
|
281
|
+
/*
|
282
|
+
* Fetch tuple at `tid` into `slot`, after doing a visibility test
|
283
|
+
* according to `snapshot`. If a tuple was found and passed the visibility
|
284
|
+
* test, return true, false otherwise.
|
285
|
+
*
|
286
|
+
* Note that AMs that do not necessarily update indexes when indexed
|
287
|
+
* columns do not change, need to return the current/correct version of
|
288
|
+
* the tuple that is visible to the snapshot, even if the tid points to an
|
289
|
+
* older version of the tuple.
|
290
|
+
*
|
291
|
+
* *call_again is false on the first call to index_fetch_tuple for a tid.
|
292
|
+
* If there potentially is another tuple matching the tid, *call_again
|
293
|
+
* needs to be set to true by index_fetch_tuple, signaling to the caller
|
294
|
+
* that index_fetch_tuple should be called again for the same tid.
|
295
|
+
*
|
296
|
+
* *all_dead, if all_dead is not NULL, should be set to true by
|
297
|
+
* index_fetch_tuple iff it is guaranteed that no backend needs to see
|
298
|
+
* that tuple. Index AMs can use that to avoid returning that tid in
|
299
|
+
* future searches.
|
300
|
+
*/
|
301
|
+
bool (*index_fetch_tuple) (struct IndexFetchTableData *scan,
|
302
|
+
ItemPointer tid,
|
303
|
+
Snapshot snapshot,
|
304
|
+
TupleTableSlot *slot,
|
305
|
+
bool *call_again, bool *all_dead);
|
306
|
+
|
307
|
+
|
308
|
+
/* ------------------------------------------------------------------------
|
309
|
+
* Callbacks for non-modifying operations on individual tuples
|
310
|
+
* ------------------------------------------------------------------------
|
311
|
+
*/
|
312
|
+
|
313
|
+
/*
|
314
|
+
* Fetch tuple at `tid` into `slot`, after doing a visibility test
|
315
|
+
* according to `snapshot`. If a tuple was found and passed the visibility
|
316
|
+
* test, returns true, false otherwise.
|
317
|
+
*/
|
318
|
+
bool (*tuple_fetch_row_version) (Relation rel,
|
319
|
+
ItemPointer tid,
|
320
|
+
Snapshot snapshot,
|
321
|
+
TupleTableSlot *slot);
|
322
|
+
|
323
|
+
/*
|
324
|
+
* Is tid valid for a scan of this relation.
|
325
|
+
*/
|
326
|
+
bool (*tuple_tid_valid) (TableScanDesc scan,
|
327
|
+
ItemPointer tid);
|
328
|
+
|
329
|
+
/*
|
330
|
+
* Return the latest version of the tuple at `tid`, by updating `tid` to
|
331
|
+
* point at the newest version.
|
332
|
+
*/
|
333
|
+
void (*tuple_get_latest_tid) (TableScanDesc scan,
|
334
|
+
ItemPointer tid);
|
335
|
+
|
336
|
+
/*
|
337
|
+
* Does the tuple in `slot` satisfy `snapshot`? The slot needs to be of
|
338
|
+
* the appropriate type for the AM.
|
339
|
+
*/
|
340
|
+
bool (*tuple_satisfies_snapshot) (Relation rel,
|
341
|
+
TupleTableSlot *slot,
|
342
|
+
Snapshot snapshot);
|
343
|
+
|
344
|
+
/* see table_compute_xid_horizon_for_tuples() */
|
345
|
+
TransactionId (*compute_xid_horizon_for_tuples) (Relation rel,
|
346
|
+
ItemPointerData *items,
|
347
|
+
int nitems);
|
348
|
+
|
349
|
+
|
350
|
+
/* ------------------------------------------------------------------------
|
351
|
+
* Manipulations of physical tuples.
|
352
|
+
* ------------------------------------------------------------------------
|
353
|
+
*/
|
354
|
+
|
355
|
+
/* see table_tuple_insert() for reference about parameters */
|
356
|
+
void (*tuple_insert) (Relation rel, TupleTableSlot *slot,
|
357
|
+
CommandId cid, int options,
|
358
|
+
struct BulkInsertStateData *bistate);
|
359
|
+
|
360
|
+
/* see table_tuple_insert_speculative() for reference about parameters */
|
361
|
+
void (*tuple_insert_speculative) (Relation rel,
|
362
|
+
TupleTableSlot *slot,
|
363
|
+
CommandId cid,
|
364
|
+
int options,
|
365
|
+
struct BulkInsertStateData *bistate,
|
366
|
+
uint32 specToken);
|
367
|
+
|
368
|
+
/* see table_tuple_complete_speculative() for reference about parameters */
|
369
|
+
void (*tuple_complete_speculative) (Relation rel,
|
370
|
+
TupleTableSlot *slot,
|
371
|
+
uint32 specToken,
|
372
|
+
bool succeeded);
|
373
|
+
|
374
|
+
/* see table_multi_insert() for reference about parameters */
|
375
|
+
void (*multi_insert) (Relation rel, TupleTableSlot **slots, int nslots,
|
376
|
+
CommandId cid, int options, struct BulkInsertStateData *bistate);
|
377
|
+
|
378
|
+
/* see table_tuple_delete() for reference about parameters */
|
379
|
+
TM_Result (*tuple_delete) (Relation rel,
|
380
|
+
ItemPointer tid,
|
381
|
+
CommandId cid,
|
382
|
+
Snapshot snapshot,
|
383
|
+
Snapshot crosscheck,
|
384
|
+
bool wait,
|
385
|
+
TM_FailureData *tmfd,
|
386
|
+
bool changingPart);
|
387
|
+
|
388
|
+
/* see table_tuple_update() for reference about parameters */
|
389
|
+
TM_Result (*tuple_update) (Relation rel,
|
390
|
+
ItemPointer otid,
|
391
|
+
TupleTableSlot *slot,
|
392
|
+
CommandId cid,
|
393
|
+
Snapshot snapshot,
|
394
|
+
Snapshot crosscheck,
|
395
|
+
bool wait,
|
396
|
+
TM_FailureData *tmfd,
|
397
|
+
LockTupleMode *lockmode,
|
398
|
+
bool *update_indexes);
|
399
|
+
|
400
|
+
/* see table_tuple_lock() for reference about parameters */
|
401
|
+
TM_Result (*tuple_lock) (Relation rel,
|
402
|
+
ItemPointer tid,
|
403
|
+
Snapshot snapshot,
|
404
|
+
TupleTableSlot *slot,
|
405
|
+
CommandId cid,
|
406
|
+
LockTupleMode mode,
|
407
|
+
LockWaitPolicy wait_policy,
|
408
|
+
uint8 flags,
|
409
|
+
TM_FailureData *tmfd);
|
410
|
+
|
411
|
+
/*
|
412
|
+
* Perform operations necessary to complete insertions made via
|
413
|
+
* tuple_insert and multi_insert with a BulkInsertState specified. In-tree
|
414
|
+
* access methods ceased to use this.
|
415
|
+
*
|
416
|
+
* Typically callers of tuple_insert and multi_insert will just pass all
|
417
|
+
* the flags that apply to them, and each AM has to decide which of them
|
418
|
+
* make sense for it, and then only take actions in finish_bulk_insert for
|
419
|
+
* those flags, and ignore others.
|
420
|
+
*
|
421
|
+
* Optional callback.
|
422
|
+
*/
|
423
|
+
void (*finish_bulk_insert) (Relation rel, int options);
|
424
|
+
|
425
|
+
|
426
|
+
/* ------------------------------------------------------------------------
|
427
|
+
* DDL related functionality.
|
428
|
+
* ------------------------------------------------------------------------
|
429
|
+
*/
|
430
|
+
|
431
|
+
/*
|
432
|
+
* This callback needs to create a new relation filenode for `rel`, with
|
433
|
+
* appropriate durability behaviour for `persistence`.
|
434
|
+
*
|
435
|
+
* Note that only the subset of the relcache filled by
|
436
|
+
* RelationBuildLocalRelation() can be relied upon and that the relation's
|
437
|
+
* catalog entries will either not yet exist (new relation), or will still
|
438
|
+
* reference the old relfilenode.
|
439
|
+
*
|
440
|
+
* As output *freezeXid, *minmulti must be set to the values appropriate
|
441
|
+
* for pg_class.{relfrozenxid, relminmxid}. For AMs that don't need those
|
442
|
+
* fields to be filled they can be set to InvalidTransactionId and
|
443
|
+
* InvalidMultiXactId, respectively.
|
444
|
+
*
|
445
|
+
* See also table_relation_set_new_filenode().
|
446
|
+
*/
|
447
|
+
void (*relation_set_new_filenode) (Relation rel,
|
448
|
+
const RelFileNode *newrnode,
|
449
|
+
char persistence,
|
450
|
+
TransactionId *freezeXid,
|
451
|
+
MultiXactId *minmulti);
|
452
|
+
|
453
|
+
/*
|
454
|
+
* This callback needs to remove all contents from `rel`'s current
|
455
|
+
* relfilenode. No provisions for transactional behaviour need to be made.
|
456
|
+
* Often this can be implemented by truncating the underlying storage to
|
457
|
+
* its minimal size.
|
458
|
+
*
|
459
|
+
* See also table_relation_nontransactional_truncate().
|
460
|
+
*/
|
461
|
+
void (*relation_nontransactional_truncate) (Relation rel);
|
462
|
+
|
463
|
+
/*
|
464
|
+
* See table_relation_copy_data().
|
465
|
+
*
|
466
|
+
* This can typically be implemented by directly copying the underlying
|
467
|
+
* storage, unless it contains references to the tablespace internally.
|
468
|
+
*/
|
469
|
+
void (*relation_copy_data) (Relation rel,
|
470
|
+
const RelFileNode *newrnode);
|
471
|
+
|
472
|
+
/* See table_relation_copy_for_cluster() */
|
473
|
+
void (*relation_copy_for_cluster) (Relation NewTable,
|
474
|
+
Relation OldTable,
|
475
|
+
Relation OldIndex,
|
476
|
+
bool use_sort,
|
477
|
+
TransactionId OldestXmin,
|
478
|
+
TransactionId *xid_cutoff,
|
479
|
+
MultiXactId *multi_cutoff,
|
480
|
+
double *num_tuples,
|
481
|
+
double *tups_vacuumed,
|
482
|
+
double *tups_recently_dead);
|
483
|
+
|
484
|
+
/*
|
485
|
+
* React to VACUUM command on the relation. The VACUUM can be triggered by
|
486
|
+
* a user or by autovacuum. The specific actions performed by the AM will
|
487
|
+
* depend heavily on the individual AM.
|
488
|
+
*
|
489
|
+
* On entry a transaction is already established, and the relation is
|
490
|
+
* locked with a ShareUpdateExclusive lock.
|
491
|
+
*
|
492
|
+
* Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through
|
493
|
+
* this routine, even if (for ANALYZE) it is part of the same VACUUM
|
494
|
+
* command.
|
495
|
+
*
|
496
|
+
* There probably, in the future, needs to be a separate callback to
|
497
|
+
* integrate with autovacuum's scheduling.
|
498
|
+
*/
|
499
|
+
void (*relation_vacuum) (Relation onerel,
|
500
|
+
struct VacuumParams *params,
|
501
|
+
BufferAccessStrategy bstrategy);
|
502
|
+
|
503
|
+
/*
|
504
|
+
* Prepare to analyze block `blockno` of `scan`. The scan has been started
|
505
|
+
* with table_beginscan_analyze(). See also
|
506
|
+
* table_scan_analyze_next_block().
|
507
|
+
*
|
508
|
+
* The callback may acquire resources like locks that are held until
|
509
|
+
* table_scan_analyze_next_tuple() returns false. It e.g. can make sense
|
510
|
+
* to hold a lock until all tuples on a block have been analyzed by
|
511
|
+
* scan_analyze_next_tuple.
|
512
|
+
*
|
513
|
+
* The callback can return false if the block is not suitable for
|
514
|
+
* sampling, e.g. because it's a metapage that could never contain tuples.
|
515
|
+
*
|
516
|
+
* XXX: This obviously is primarily suited for block-based AMs. It's not
|
517
|
+
* clear what a good interface for non block based AMs would be, so there
|
518
|
+
* isn't one yet.
|
519
|
+
*/
|
520
|
+
bool (*scan_analyze_next_block) (TableScanDesc scan,
|
521
|
+
BlockNumber blockno,
|
522
|
+
BufferAccessStrategy bstrategy);
|
523
|
+
|
524
|
+
/*
|
525
|
+
* See table_scan_analyze_next_tuple().
|
526
|
+
*
|
527
|
+
* Not every AM might have a meaningful concept of dead rows, in which
|
528
|
+
* case it's OK to not increment *deadrows - but note that that may
|
529
|
+
* influence autovacuum scheduling (see comment for relation_vacuum
|
530
|
+
* callback).
|
531
|
+
*/
|
532
|
+
bool (*scan_analyze_next_tuple) (TableScanDesc scan,
|
533
|
+
TransactionId OldestXmin,
|
534
|
+
double *liverows,
|
535
|
+
double *deadrows,
|
536
|
+
TupleTableSlot *slot);
|
537
|
+
|
538
|
+
/* see table_index_build_range_scan for reference about parameters */
|
539
|
+
double (*index_build_range_scan) (Relation table_rel,
|
540
|
+
Relation index_rel,
|
541
|
+
struct IndexInfo *index_info,
|
542
|
+
bool allow_sync,
|
543
|
+
bool anyvisible,
|
544
|
+
bool progress,
|
545
|
+
BlockNumber start_blockno,
|
546
|
+
BlockNumber numblocks,
|
547
|
+
IndexBuildCallback callback,
|
548
|
+
void *callback_state,
|
549
|
+
TableScanDesc scan);
|
550
|
+
|
551
|
+
/* see table_index_validate_scan for reference about parameters */
|
552
|
+
void (*index_validate_scan) (Relation table_rel,
|
553
|
+
Relation index_rel,
|
554
|
+
struct IndexInfo *index_info,
|
555
|
+
Snapshot snapshot,
|
556
|
+
struct ValidateIndexState *state);
|
557
|
+
|
558
|
+
|
559
|
+
/* ------------------------------------------------------------------------
|
560
|
+
* Miscellaneous functions.
|
561
|
+
* ------------------------------------------------------------------------
|
562
|
+
*/
|
563
|
+
|
564
|
+
/*
|
565
|
+
* See table_relation_size().
|
566
|
+
*
|
567
|
+
* Note that currently a few callers use the MAIN_FORKNUM size to figure
|
568
|
+
* out the range of potentially interesting blocks (brin, analyze). It's
|
569
|
+
* probable that we'll need to revise the interface for those at some
|
570
|
+
* point.
|
571
|
+
*/
|
572
|
+
uint64 (*relation_size) (Relation rel, ForkNumber forkNumber);
|
573
|
+
|
574
|
+
|
575
|
+
/*
|
576
|
+
* This callback should return true if the relation requires a TOAST table
|
577
|
+
* and false if it does not. It may wish to examine the relation's tuple
|
578
|
+
* descriptor before making a decision, but if it uses some other method
|
579
|
+
* of storing large values (or if it does not support them) it can simply
|
580
|
+
* return false.
|
581
|
+
*/
|
582
|
+
bool (*relation_needs_toast_table) (Relation rel);
|
583
|
+
|
584
|
+
/*
|
585
|
+
* This callback should return the OID of the table AM that implements
|
586
|
+
* TOAST tables for this AM. If the relation_needs_toast_table callback
|
587
|
+
* always returns false, this callback is not required.
|
588
|
+
*/
|
589
|
+
Oid (*relation_toast_am) (Relation rel);
|
590
|
+
|
591
|
+
/*
|
592
|
+
* This callback is invoked when detoasting a value stored in a toast
|
593
|
+
* table implemented by this AM. See table_relation_fetch_toast_slice()
|
594
|
+
* for more details.
|
595
|
+
*/
|
596
|
+
void (*relation_fetch_toast_slice) (Relation toastrel, Oid valueid,
|
597
|
+
int32 attrsize,
|
598
|
+
int32 sliceoffset,
|
599
|
+
int32 slicelength,
|
600
|
+
struct varlena *result);
|
601
|
+
|
602
|
+
|
603
|
+
/* ------------------------------------------------------------------------
|
604
|
+
* Planner related functions.
|
605
|
+
* ------------------------------------------------------------------------
|
606
|
+
*/
|
607
|
+
|
608
|
+
/*
|
609
|
+
* See table_relation_estimate_size().
|
610
|
+
*
|
611
|
+
* While block oriented, it shouldn't be too hard for an AM that doesn't
|
612
|
+
* internally use blocks to convert into a usable representation.
|
613
|
+
*
|
614
|
+
* This differs from the relation_size callback by returning size
|
615
|
+
* estimates (both relation size and tuple count) for planning purposes,
|
616
|
+
* rather than returning a currently correct estimate.
|
617
|
+
*/
|
618
|
+
void (*relation_estimate_size) (Relation rel, int32 *attr_widths,
|
619
|
+
BlockNumber *pages, double *tuples,
|
620
|
+
double *allvisfrac);
|
621
|
+
|
622
|
+
|
623
|
+
/* ------------------------------------------------------------------------
|
624
|
+
* Executor related functions.
|
625
|
+
* ------------------------------------------------------------------------
|
626
|
+
*/
|
627
|
+
|
628
|
+
/*
|
629
|
+
* Prepare to fetch / check / return tuples from `tbmres->blockno` as part
|
630
|
+
* of a bitmap table scan. `scan` was started via table_beginscan_bm().
|
631
|
+
* Return false if there are no tuples to be found on the page, true
|
632
|
+
* otherwise.
|
633
|
+
*
|
634
|
+
* This will typically read and pin the target block, and do the necessary
|
635
|
+
* work to allow scan_bitmap_next_tuple() to return tuples (e.g. it might
|
636
|
+
* make sense to perform tuple visibility checks at this time). For some
|
637
|
+
* AMs it will make more sense to do all the work referencing `tbmres`
|
638
|
+
* contents here, for others it might be better to defer more work to
|
639
|
+
* scan_bitmap_next_tuple.
|
640
|
+
*
|
641
|
+
* If `tbmres->blockno` is -1, this is a lossy scan and all visible tuples
|
642
|
+
* on the page have to be returned, otherwise the tuples at offsets in
|
643
|
+
* `tbmres->offsets` need to be returned.
|
644
|
+
*
|
645
|
+
* XXX: Currently this may only be implemented if the AM uses md.c as its
|
646
|
+
* storage manager, and uses ItemPointer->ip_blkid in a manner that maps
|
647
|
+
* blockids directly to the underlying storage. nodeBitmapHeapscan.c
|
648
|
+
* performs prefetching directly using that interface. This probably
|
649
|
+
* needs to be rectified at a later point.
|
650
|
+
*
|
651
|
+
* XXX: Currently this may only be implemented if the AM uses the
|
652
|
+
* visibilitymap, as nodeBitmapHeapscan.c unconditionally accesses it to
|
653
|
+
* perform prefetching. This probably needs to be rectified at a later
|
654
|
+
* point.
|
655
|
+
*
|
656
|
+
* Optional callback, but either both scan_bitmap_next_block and
|
657
|
+
* scan_bitmap_next_tuple need to exist, or neither.
|
658
|
+
*/
|
659
|
+
bool (*scan_bitmap_next_block) (TableScanDesc scan,
|
660
|
+
struct TBMIterateResult *tbmres);
|
661
|
+
|
662
|
+
/*
|
663
|
+
* Fetch the next tuple of a bitmap table scan into `slot` and return true
|
664
|
+
* if a visible tuple was found, false otherwise.
|
665
|
+
*
|
666
|
+
* For some AMs it will make more sense to do all the work referencing
|
667
|
+
* `tbmres` contents in scan_bitmap_next_block, for others it might be
|
668
|
+
* better to defer more work to this callback.
|
669
|
+
*
|
670
|
+
* Optional callback, but either both scan_bitmap_next_block and
|
671
|
+
* scan_bitmap_next_tuple need to exist, or neither.
|
672
|
+
*/
|
673
|
+
bool (*scan_bitmap_next_tuple) (TableScanDesc scan,
|
674
|
+
struct TBMIterateResult *tbmres,
|
675
|
+
TupleTableSlot *slot);
|
676
|
+
|
677
|
+
/*
|
678
|
+
* Prepare to fetch tuples from the next block in a sample scan. Return
|
679
|
+
* false if the sample scan is finished, true otherwise. `scan` was
|
680
|
+
* started via table_beginscan_sampling().
|
681
|
+
*
|
682
|
+
* Typically this will first determine the target block by calling the
|
683
|
+
* TsmRoutine's NextSampleBlock() callback if not NULL, or alternatively
|
684
|
+
* perform a sequential scan over all blocks. The determined block is
|
685
|
+
* then typically read and pinned.
|
686
|
+
*
|
687
|
+
* As the TsmRoutine interface is block based, a block needs to be passed
|
688
|
+
* to NextSampleBlock(). If that's not appropriate for an AM, it
|
689
|
+
* internally needs to perform mapping between the internal and a block
|
690
|
+
* based representation.
|
691
|
+
*
|
692
|
+
* Note that it's not acceptable to hold deadlock prone resources such as
|
693
|
+
* lwlocks until scan_sample_next_tuple() has exhausted the tuples on the
|
694
|
+
* block - the tuple is likely to be returned to an upper query node, and
|
695
|
+
* the next call could be off a long while. Holding buffer pins and such
|
696
|
+
* is obviously OK.
|
697
|
+
*
|
698
|
+
* Currently it is required to implement this interface, as there's no
|
699
|
+
* alternative way (contrary e.g. to bitmap scans) to implement sample
|
700
|
+
* scans. If infeasible to implement, the AM may raise an error.
|
701
|
+
*/
|
702
|
+
bool (*scan_sample_next_block) (TableScanDesc scan,
|
703
|
+
struct SampleScanState *scanstate);
|
704
|
+
|
705
|
+
/*
|
706
|
+
* This callback, only called after scan_sample_next_block has returned
|
707
|
+
* true, should determine the next tuple to be returned from the selected
|
708
|
+
* block using the TsmRoutine's NextSampleTuple() callback.
|
709
|
+
*
|
710
|
+
* The callback needs to perform visibility checks, and only return
|
711
|
+
* visible tuples. That obviously can mean calling NextSampleTuple()
|
712
|
+
* multiple times.
|
713
|
+
*
|
714
|
+
* The TsmRoutine interface assumes that there's a maximum offset on a
|
715
|
+
* given page, so if that doesn't apply to an AM, it needs to emulate that
|
716
|
+
* assumption somehow.
|
717
|
+
*/
|
718
|
+
bool (*scan_sample_next_tuple) (TableScanDesc scan,
|
719
|
+
struct SampleScanState *scanstate,
|
720
|
+
TupleTableSlot *slot);
|
721
|
+
|
722
|
+
} TableAmRoutine;
|
723
|
+
|
724
|
+
|
725
|
+
/* ----------------------------------------------------------------------------
|
726
|
+
* Slot functions.
|
727
|
+
* ----------------------------------------------------------------------------
|
728
|
+
*/
|
729
|
+
|
730
|
+
/*
|
731
|
+
* Returns slot callbacks suitable for holding tuples of the appropriate type
|
732
|
+
* for the relation. Works for tables, views, foreign tables and partitioned
|
733
|
+
* tables.
|
734
|
+
*/
|
735
|
+
extern const TupleTableSlotOps *table_slot_callbacks(Relation rel);
|
736
|
+
|
737
|
+
/*
|
738
|
+
* Returns slot using the callbacks returned by table_slot_callbacks(), and
|
739
|
+
* registers it on *reglist.
|
740
|
+
*/
|
741
|
+
extern TupleTableSlot *table_slot_create(Relation rel, List **reglist);
|
742
|
+
|
743
|
+
|
744
|
+
/* ----------------------------------------------------------------------------
|
745
|
+
* Table scan functions.
|
746
|
+
* ----------------------------------------------------------------------------
|
747
|
+
*/
|
748
|
+
|
749
|
+
/*
|
750
|
+
* Start a scan of `rel`. Returned tuples pass a visibility test of
|
751
|
+
* `snapshot`, and if nkeys != 0, the results are filtered by those scan keys.
|
752
|
+
*/
|
753
|
+
static inline TableScanDesc
|
754
|
+
table_beginscan(Relation rel, Snapshot snapshot,
|
755
|
+
int nkeys, struct ScanKeyData *key)
|
756
|
+
{
|
757
|
+
uint32 flags = SO_TYPE_SEQSCAN |
|
758
|
+
SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
|
759
|
+
|
760
|
+
return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
|
761
|
+
}
|
762
|
+
|
763
|
+
/*
|
764
|
+
* Like table_beginscan(), but for scanning catalog. It'll automatically use a
|
765
|
+
* snapshot appropriate for scanning catalog relations.
|
766
|
+
*/
|
767
|
+
extern TableScanDesc table_beginscan_catalog(Relation rel, int nkeys,
|
768
|
+
struct ScanKeyData *key);
|
769
|
+
|
770
|
+
/*
|
771
|
+
* Like table_beginscan(), but table_beginscan_strat() offers an extended API
|
772
|
+
* that lets the caller control whether a nondefault buffer access strategy
|
773
|
+
* can be used, and whether syncscan can be chosen (possibly resulting in the
|
774
|
+
* scan not starting from block zero). Both of these default to true with
|
775
|
+
* plain table_beginscan.
|
776
|
+
*/
|
777
|
+
static inline TableScanDesc
|
778
|
+
table_beginscan_strat(Relation rel, Snapshot snapshot,
|
779
|
+
int nkeys, struct ScanKeyData *key,
|
780
|
+
bool allow_strat, bool allow_sync)
|
781
|
+
{
|
782
|
+
uint32 flags = SO_TYPE_SEQSCAN | SO_ALLOW_PAGEMODE;
|
783
|
+
|
784
|
+
if (allow_strat)
|
785
|
+
flags |= SO_ALLOW_STRAT;
|
786
|
+
if (allow_sync)
|
787
|
+
flags |= SO_ALLOW_SYNC;
|
788
|
+
|
789
|
+
return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
|
790
|
+
}
|
791
|
+
|
792
|
+
/*
|
793
|
+
* table_beginscan_bm is an alternative entry point for setting up a
|
794
|
+
* TableScanDesc for a bitmap heap scan. Although that scan technology is
|
795
|
+
* really quite unlike a standard seqscan, there is just enough commonality to
|
796
|
+
* make it worth using the same data structure.
|
797
|
+
*/
|
798
|
+
static inline TableScanDesc
|
799
|
+
table_beginscan_bm(Relation rel, Snapshot snapshot,
|
800
|
+
int nkeys, struct ScanKeyData *key)
|
801
|
+
{
|
802
|
+
uint32 flags = SO_TYPE_BITMAPSCAN | SO_ALLOW_PAGEMODE;
|
803
|
+
|
804
|
+
return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
|
805
|
+
}
|
806
|
+
|
807
|
+
/*
|
808
|
+
* table_beginscan_sampling is an alternative entry point for setting up a
|
809
|
+
* TableScanDesc for a TABLESAMPLE scan. As with bitmap scans, it's worth
|
810
|
+
* using the same data structure although the behavior is rather different.
|
811
|
+
* In addition to the options offered by table_beginscan_strat, this call
|
812
|
+
* also allows control of whether page-mode visibility checking is used.
|
813
|
+
*/
|
814
|
+
static inline TableScanDesc
|
815
|
+
table_beginscan_sampling(Relation rel, Snapshot snapshot,
|
816
|
+
int nkeys, struct ScanKeyData *key,
|
817
|
+
bool allow_strat, bool allow_sync,
|
818
|
+
bool allow_pagemode)
|
819
|
+
{
|
820
|
+
uint32 flags = SO_TYPE_SAMPLESCAN;
|
821
|
+
|
822
|
+
if (allow_strat)
|
823
|
+
flags |= SO_ALLOW_STRAT;
|
824
|
+
if (allow_sync)
|
825
|
+
flags |= SO_ALLOW_SYNC;
|
826
|
+
if (allow_pagemode)
|
827
|
+
flags |= SO_ALLOW_PAGEMODE;
|
828
|
+
|
829
|
+
return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
|
830
|
+
}
|
831
|
+
|
832
|
+
/*
|
833
|
+
* table_beginscan_tid is an alternative entry point for setting up a
|
834
|
+
* TableScanDesc for a Tid scan. As with bitmap scans, it's worth using
|
835
|
+
* the same data structure although the behavior is rather different.
|
836
|
+
*/
|
837
|
+
static inline TableScanDesc
|
838
|
+
table_beginscan_tid(Relation rel, Snapshot snapshot)
|
839
|
+
{
|
840
|
+
uint32 flags = SO_TYPE_TIDSCAN;
|
841
|
+
|
842
|
+
return rel->rd_tableam->scan_begin(rel, snapshot, 0, NULL, NULL, flags);
|
843
|
+
}
|
844
|
+
|
845
|
+
/*
|
846
|
+
* table_beginscan_analyze is an alternative entry point for setting up a
|
847
|
+
* TableScanDesc for an ANALYZE scan. As with bitmap scans, it's worth using
|
848
|
+
* the same data structure although the behavior is rather different.
|
849
|
+
*/
|
850
|
+
static inline TableScanDesc
|
851
|
+
table_beginscan_analyze(Relation rel)
|
852
|
+
{
|
853
|
+
uint32 flags = SO_TYPE_ANALYZE;
|
854
|
+
|
855
|
+
return rel->rd_tableam->scan_begin(rel, NULL, 0, NULL, NULL, flags);
|
856
|
+
}
|
857
|
+
|
858
|
+
/*
|
859
|
+
* End relation scan.
|
860
|
+
*/
|
861
|
+
static inline void
|
862
|
+
table_endscan(TableScanDesc scan)
|
863
|
+
{
|
864
|
+
scan->rs_rd->rd_tableam->scan_end(scan);
|
865
|
+
}
|
866
|
+
|
867
|
+
/*
|
868
|
+
* Restart a relation scan.
|
869
|
+
*/
|
870
|
+
static inline void
|
871
|
+
table_rescan(TableScanDesc scan,
|
872
|
+
struct ScanKeyData *key)
|
873
|
+
{
|
874
|
+
scan->rs_rd->rd_tableam->scan_rescan(scan, key, false, false, false, false);
|
875
|
+
}
|
876
|
+
|
877
|
+
/*
|
878
|
+
* Restart a relation scan after changing params.
|
879
|
+
*
|
880
|
+
* This call allows changing the buffer strategy, syncscan, and pagemode
|
881
|
+
* options before starting a fresh scan. Note that although the actual use of
|
882
|
+
* syncscan might change (effectively, enabling or disabling reporting), the
|
883
|
+
* previously selected startblock will be kept.
|
884
|
+
*/
|
885
|
+
static inline void
|
886
|
+
table_rescan_set_params(TableScanDesc scan, struct ScanKeyData *key,
|
887
|
+
bool allow_strat, bool allow_sync, bool allow_pagemode)
|
888
|
+
{
|
889
|
+
scan->rs_rd->rd_tableam->scan_rescan(scan, key, true,
|
890
|
+
allow_strat, allow_sync,
|
891
|
+
allow_pagemode);
|
892
|
+
}
|
893
|
+
|
894
|
+
/*
|
895
|
+
* Update snapshot used by the scan.
|
896
|
+
*/
|
897
|
+
extern void table_scan_update_snapshot(TableScanDesc scan, Snapshot snapshot);
|
898
|
+
|
899
|
+
/*
|
900
|
+
* Return next tuple from `scan`, store in slot.
|
901
|
+
*/
|
902
|
+
static inline bool
|
903
|
+
table_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
|
904
|
+
{
|
905
|
+
slot->tts_tableOid = RelationGetRelid(sscan->rs_rd);
|
906
|
+
return sscan->rs_rd->rd_tableam->scan_getnextslot(sscan, direction, slot);
|
907
|
+
}
|
908
|
+
|
909
|
+
|
910
|
+
/* ----------------------------------------------------------------------------
|
911
|
+
* Parallel table scan related functions.
|
912
|
+
* ----------------------------------------------------------------------------
|
913
|
+
*/
|
914
|
+
|
915
|
+
/*
|
916
|
+
* Estimate the size of shared memory needed for a parallel scan of this
|
917
|
+
* relation.
|
918
|
+
*/
|
919
|
+
extern Size table_parallelscan_estimate(Relation rel, Snapshot snapshot);
|
920
|
+
|
921
|
+
/*
|
922
|
+
* Initialize ParallelTableScanDesc for a parallel scan of this
|
923
|
+
* relation. `pscan` needs to be sized according to parallelscan_estimate()
|
924
|
+
* for the same relation. Call this just once in the leader process; then,
|
925
|
+
* individual workers attach via table_beginscan_parallel.
|
926
|
+
*/
|
927
|
+
extern void table_parallelscan_initialize(Relation rel,
|
928
|
+
ParallelTableScanDesc pscan,
|
929
|
+
Snapshot snapshot);
|
930
|
+
|
931
|
+
/*
|
932
|
+
* Begin a parallel scan. `pscan` needs to have been initialized with
|
933
|
+
* table_parallelscan_initialize(), for the same relation. The initialization
|
934
|
+
* does not need to have happened in this backend.
|
935
|
+
*
|
936
|
+
* Caller must hold a suitable lock on the relation.
|
937
|
+
*/
|
938
|
+
extern TableScanDesc table_beginscan_parallel(Relation rel,
|
939
|
+
ParallelTableScanDesc pscan);
|
940
|
+
|
941
|
+
/*
|
942
|
+
* Restart a parallel scan. Call this in the leader process. Caller is
|
943
|
+
* responsible for making sure that all workers have finished the scan
|
944
|
+
* beforehand.
|
945
|
+
*/
|
946
|
+
static inline void
|
947
|
+
table_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
|
948
|
+
{
|
949
|
+
rel->rd_tableam->parallelscan_reinitialize(rel, pscan);
|
950
|
+
}
|
951
|
+
|
952
|
+
|
953
|
+
/* ----------------------------------------------------------------------------
|
954
|
+
* Index scan related functions.
|
955
|
+
* ----------------------------------------------------------------------------
|
956
|
+
*/
|
957
|
+
|
958
|
+
/*
|
959
|
+
* Prepare to fetch tuples from the relation, as needed when fetching tuples
|
960
|
+
* for an index scan.
|
961
|
+
*
|
962
|
+
* Tuples for an index scan can then be fetched via table_index_fetch_tuple().
|
963
|
+
*/
|
964
|
+
static inline IndexFetchTableData *
|
965
|
+
table_index_fetch_begin(Relation rel)
|
966
|
+
{
|
967
|
+
return rel->rd_tableam->index_fetch_begin(rel);
|
968
|
+
}
|
969
|
+
|
970
|
+
/*
|
971
|
+
* Reset index fetch. Typically this will release cross index fetch resources
|
972
|
+
* held in IndexFetchTableData.
|
973
|
+
*/
|
974
|
+
static inline void
|
975
|
+
table_index_fetch_reset(struct IndexFetchTableData *scan)
|
976
|
+
{
|
977
|
+
scan->rel->rd_tableam->index_fetch_reset(scan);
|
978
|
+
}
|
979
|
+
|
980
|
+
/*
|
981
|
+
* Release resources and deallocate index fetch.
|
982
|
+
*/
|
983
|
+
static inline void
|
984
|
+
table_index_fetch_end(struct IndexFetchTableData *scan)
|
985
|
+
{
|
986
|
+
scan->rel->rd_tableam->index_fetch_end(scan);
|
987
|
+
}
|
988
|
+
|
989
|
+
/*
|
990
|
+
* Fetches, as part of an index scan, tuple at `tid` into `slot`, after doing
|
991
|
+
* a visibility test according to `snapshot`. If a tuple was found and passed
|
992
|
+
* the visibility test, returns true, false otherwise. Note that *tid may be
|
993
|
+
* modified when we return true (see later remarks on multiple row versions
|
994
|
+
* reachable via a single index entry).
|
995
|
+
*
|
996
|
+
* *call_again needs to be false on the first call to table_index_fetch_tuple() for
|
997
|
+
* a tid. If there potentially is another tuple matching the tid, *call_again
|
998
|
+
* will be set to true, signaling that table_index_fetch_tuple() should be called
|
999
|
+
* again for the same tid.
|
1000
|
+
*
|
1001
|
+
* *all_dead, if all_dead is not NULL, will be set to true by
|
1002
|
+
* table_index_fetch_tuple() iff it is guaranteed that no backend needs to see
|
1003
|
+
* that tuple. Index AMs can use that to avoid returning that tid in future
|
1004
|
+
* searches.
|
1005
|
+
*
|
1006
|
+
* The difference between this function and table_tuple_fetch_row_version()
|
1007
|
+
* is that this function returns the currently visible version of a row if
|
1008
|
+
* the AM supports storing multiple row versions reachable via a single index
|
1009
|
+
* entry (like heap's HOT). Whereas table_tuple_fetch_row_version() only
|
1010
|
+
* evaluates the tuple exactly at `tid`. Outside of index entry ->table tuple
|
1011
|
+
* lookups, table_tuple_fetch_row_version() is what's usually needed.
|
1012
|
+
*/
|
1013
|
+
static inline bool
|
1014
|
+
table_index_fetch_tuple(struct IndexFetchTableData *scan,
|
1015
|
+
ItemPointer tid,
|
1016
|
+
Snapshot snapshot,
|
1017
|
+
TupleTableSlot *slot,
|
1018
|
+
bool *call_again, bool *all_dead)
|
1019
|
+
{
|
1020
|
+
|
1021
|
+
return scan->rel->rd_tableam->index_fetch_tuple(scan, tid, snapshot,
|
1022
|
+
slot, call_again,
|
1023
|
+
all_dead);
|
1024
|
+
}
|
1025
|
+
|
1026
|
+
/*
|
1027
|
+
* This is a convenience wrapper around table_index_fetch_tuple() which
|
1028
|
+
* returns whether there are table tuple items corresponding to an index
|
1029
|
+
* entry. This likely is only useful to verify if there's a conflict in a
|
1030
|
+
* unique index.
|
1031
|
+
*/
|
1032
|
+
extern bool table_index_fetch_tuple_check(Relation rel,
|
1033
|
+
ItemPointer tid,
|
1034
|
+
Snapshot snapshot,
|
1035
|
+
bool *all_dead);
|
1036
|
+
|
1037
|
+
|
1038
|
+
/* ------------------------------------------------------------------------
|
1039
|
+
* Functions for non-modifying operations on individual tuples
|
1040
|
+
* ------------------------------------------------------------------------
|
1041
|
+
*/
|
1042
|
+
|
1043
|
+
|
1044
|
+
/*
|
1045
|
+
* Fetch tuple at `tid` into `slot`, after doing a visibility test according to
|
1046
|
+
* `snapshot`. If a tuple was found and passed the visibility test, returns
|
1047
|
+
* true, false otherwise.
|
1048
|
+
*
|
1049
|
+
* See table_index_fetch_tuple's comment about what the difference between
|
1050
|
+
* these functions is. It is correct to use this function outside of index
|
1051
|
+
* entry->table tuple lookups.
|
1052
|
+
*/
|
1053
|
+
static inline bool
|
1054
|
+
table_tuple_fetch_row_version(Relation rel,
|
1055
|
+
ItemPointer tid,
|
1056
|
+
Snapshot snapshot,
|
1057
|
+
TupleTableSlot *slot)
|
1058
|
+
{
|
1059
|
+
return rel->rd_tableam->tuple_fetch_row_version(rel, tid, snapshot, slot);
|
1060
|
+
}
|
1061
|
+
|
1062
|
+
/*
|
1063
|
+
* Verify that `tid` is a potentially valid tuple identifier. That doesn't
|
1064
|
+
* mean that the pointed to row needs to exist or be visible, but that
|
1065
|
+
* attempting to fetch the row (e.g. with table_tuple_get_latest_tid() or
|
1066
|
+
* table_tuple_fetch_row_version()) should not error out if called with that
|
1067
|
+
* tid.
|
1068
|
+
*
|
1069
|
+
* `scan` needs to have been started via table_beginscan().
|
1070
|
+
*/
|
1071
|
+
static inline bool
|
1072
|
+
table_tuple_tid_valid(TableScanDesc scan, ItemPointer tid)
|
1073
|
+
{
|
1074
|
+
return scan->rs_rd->rd_tableam->tuple_tid_valid(scan, tid);
|
1075
|
+
}
|
1076
|
+
|
1077
|
+
/*
|
1078
|
+
* Return the latest version of the tuple at `tid`, by updating `tid` to
|
1079
|
+
* point at the newest version.
|
1080
|
+
*/
|
1081
|
+
extern void table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid);
|
1082
|
+
|
1083
|
+
/*
|
1084
|
+
* Return true iff tuple in slot satisfies the snapshot.
|
1085
|
+
*
|
1086
|
+
* This assumes the slot's tuple is valid, and of the appropriate type for the
|
1087
|
+
* AM.
|
1088
|
+
*
|
1089
|
+
* Some AMs might modify the data underlying the tuple as a side-effect. If so
|
1090
|
+
* they ought to mark the relevant buffer dirty.
|
1091
|
+
*/
|
1092
|
+
static inline bool
|
1093
|
+
table_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
|
1094
|
+
Snapshot snapshot)
|
1095
|
+
{
|
1096
|
+
return rel->rd_tableam->tuple_satisfies_snapshot(rel, slot, snapshot);
|
1097
|
+
}
|
1098
|
+
|
1099
|
+
/*
|
1100
|
+
* Compute the newest xid among the tuples pointed to by items. This is used
|
1101
|
+
* to compute what snapshots to conflict with when replaying WAL records for
|
1102
|
+
* page-level index vacuums.
|
1103
|
+
*/
|
1104
|
+
static inline TransactionId
|
1105
|
+
table_compute_xid_horizon_for_tuples(Relation rel,
|
1106
|
+
ItemPointerData *items,
|
1107
|
+
int nitems)
|
1108
|
+
{
|
1109
|
+
return rel->rd_tableam->compute_xid_horizon_for_tuples(rel, items, nitems);
|
1110
|
+
}
|
1111
|
+
|
1112
|
+
|
1113
|
+
/* ----------------------------------------------------------------------------
|
1114
|
+
* Functions for manipulations of physical tuples.
|
1115
|
+
* ----------------------------------------------------------------------------
|
1116
|
+
*/
|
1117
|
+
|
1118
|
+
/*
|
1119
|
+
* Insert a tuple from a slot into table AM routine.
|
1120
|
+
*
|
1121
|
+
* The options bitmask allows the caller to specify options that may change the
|
1122
|
+
* behaviour of the AM. The AM will ignore options that it does not support.
|
1123
|
+
*
|
1124
|
+
* If the TABLE_INSERT_SKIP_FSM option is specified, AMs are free to not reuse
|
1125
|
+
* free space in the relation. This can save some cycles when we know the
|
1126
|
+
* relation is new and doesn't contain useful amounts of free space.
|
1127
|
+
* TABLE_INSERT_SKIP_FSM is commonly passed directly to
|
1128
|
+
* RelationGetBufferForTuple. See that method for more information.
|
1129
|
+
*
|
1130
|
+
* TABLE_INSERT_FROZEN should only be specified for inserts into
|
1131
|
+
* relfilenodes created during the current subtransaction and when
|
1132
|
+
* there are no prior snapshots or pre-existing portals open.
|
1133
|
+
* This causes rows to be frozen, which is an MVCC violation and
|
1134
|
+
* requires explicit options chosen by user.
|
1135
|
+
*
|
1136
|
+
* TABLE_INSERT_NO_LOGICAL force-disables the emitting of logical decoding
|
1137
|
+
* information for the tuple. This should solely be used during table rewrites
|
1138
|
+
* where RelationIsLogicallyLogged(relation) is not yet accurate for the new
|
1139
|
+
* relation.
|
1140
|
+
*
|
1141
|
+
* Note that most of these options will be applied when inserting into the
|
1142
|
+
* heap's TOAST table, too, if the tuple requires any out-of-line data.
|
1143
|
+
*
|
1144
|
+
* The BulkInsertState object (if any; bistate can be NULL for default
|
1145
|
+
* behavior) is also just passed through to RelationGetBufferForTuple. If
|
1146
|
+
* `bistate` is provided, table_finish_bulk_insert() needs to be called.
|
1147
|
+
*
|
1148
|
+
* On return the slot's tts_tid and tts_tableOid are updated to reflect the
|
1149
|
+
* insertion. But note that any toasting of fields within the slot is NOT
|
1150
|
+
* reflected in the slots contents.
|
1151
|
+
*/
|
1152
|
+
static inline void
|
1153
|
+
table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid,
|
1154
|
+
int options, struct BulkInsertStateData *bistate)
|
1155
|
+
{
|
1156
|
+
rel->rd_tableam->tuple_insert(rel, slot, cid, options,
|
1157
|
+
bistate);
|
1158
|
+
}
|
1159
|
+
|
1160
|
+
/*
|
1161
|
+
* Perform a "speculative insertion". These can be backed out afterwards
|
1162
|
+
* without aborting the whole transaction. Other sessions can wait for the
|
1163
|
+
* speculative insertion to be confirmed, turning it into a regular tuple, or
|
1164
|
+
* aborted, as if it never existed. Speculatively inserted tuples behave as
|
1165
|
+
* "value locks" of short duration, used to implement INSERT .. ON CONFLICT.
|
1166
|
+
*
|
1167
|
+
* A transaction having performed a speculative insertion has to either abort,
|
1168
|
+
* or finish the speculative insertion with
|
1169
|
+
* table_tuple_complete_speculative(succeeded = ...).
|
1170
|
+
*/
|
1171
|
+
static inline void
|
1172
|
+
table_tuple_insert_speculative(Relation rel, TupleTableSlot *slot,
|
1173
|
+
CommandId cid, int options,
|
1174
|
+
struct BulkInsertStateData *bistate,
|
1175
|
+
uint32 specToken)
|
1176
|
+
{
|
1177
|
+
rel->rd_tableam->tuple_insert_speculative(rel, slot, cid, options,
|
1178
|
+
bistate, specToken);
|
1179
|
+
}
|
1180
|
+
|
1181
|
+
/*
|
1182
|
+
* Complete "speculative insertion" started in the same transaction. If
|
1183
|
+
* succeeded is true, the tuple is fully inserted, if false, it's removed.
|
1184
|
+
*/
|
1185
|
+
static inline void
|
1186
|
+
table_tuple_complete_speculative(Relation rel, TupleTableSlot *slot,
|
1187
|
+
uint32 specToken, bool succeeded)
|
1188
|
+
{
|
1189
|
+
rel->rd_tableam->tuple_complete_speculative(rel, slot, specToken,
|
1190
|
+
succeeded);
|
1191
|
+
}
|
1192
|
+
|
1193
|
+
/*
|
1194
|
+
* Insert multiple tuples into a table.
|
1195
|
+
*
|
1196
|
+
* This is like table_tuple_insert(), but inserts multiple tuples in one
|
1197
|
+
* operation. That's often faster than calling table_tuple_insert() in a loop,
|
1198
|
+
* because e.g. the AM can reduce WAL logging and page locking overhead.
|
1199
|
+
*
|
1200
|
+
* Except for taking `nslots` tuples as input, and an array of TupleTableSlots
|
1201
|
+
* in `slots`, the parameters for table_multi_insert() are the same as for
|
1202
|
+
* table_tuple_insert().
|
1203
|
+
*
|
1204
|
+
* Note: this leaks memory into the current memory context. You can create a
|
1205
|
+
* temporary context before calling this, if that's a problem.
|
1206
|
+
*/
|
1207
|
+
static inline void
|
1208
|
+
table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots,
|
1209
|
+
CommandId cid, int options, struct BulkInsertStateData *bistate)
|
1210
|
+
{
|
1211
|
+
rel->rd_tableam->multi_insert(rel, slots, nslots,
|
1212
|
+
cid, options, bistate);
|
1213
|
+
}
|
1214
|
+
|
1215
|
+
/*
|
1216
|
+
* Delete a tuple.
|
1217
|
+
*
|
1218
|
+
* NB: do not call this directly unless prepared to deal with
|
1219
|
+
* concurrent-update conditions. Use simple_table_tuple_delete instead.
|
1220
|
+
*
|
1221
|
+
* Input parameters:
|
1222
|
+
* relation - table to be modified (caller must hold suitable lock)
|
1223
|
+
* tid - TID of tuple to be deleted
|
1224
|
+
* cid - delete command ID (used for visibility test, and stored into
|
1225
|
+
* cmax if successful)
|
1226
|
+
* crosscheck - if not InvalidSnapshot, also check tuple against this
|
1227
|
+
* wait - true if should wait for any conflicting update to commit/abort
|
1228
|
+
* Output parameters:
|
1229
|
+
* tmfd - filled in failure cases (see below)
|
1230
|
+
* changingPart - true iff the tuple is being moved to another partition
|
1231
|
+
* table due to an update of the partition key. Otherwise, false.
|
1232
|
+
*
|
1233
|
+
* Normal, successful return value is TM_Ok, which means we did actually
|
1234
|
+
* delete it. Failure return codes are TM_SelfModified, TM_Updated, and
|
1235
|
+
* TM_BeingModified (the last only possible if wait == false).
|
1236
|
+
*
|
1237
|
+
* In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
|
1238
|
+
* t_xmax, and, if possible, and, if possible, t_cmax. See comments for
|
1239
|
+
* struct TM_FailureData for additional info.
|
1240
|
+
*/
|
1241
|
+
static inline TM_Result
|
1242
|
+
table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid,
|
1243
|
+
Snapshot snapshot, Snapshot crosscheck, bool wait,
|
1244
|
+
TM_FailureData *tmfd, bool changingPart)
|
1245
|
+
{
|
1246
|
+
return rel->rd_tableam->tuple_delete(rel, tid, cid,
|
1247
|
+
snapshot, crosscheck,
|
1248
|
+
wait, tmfd, changingPart);
|
1249
|
+
}
|
1250
|
+
|
1251
|
+
/*
|
1252
|
+
* Update a tuple.
|
1253
|
+
*
|
1254
|
+
* NB: do not call this directly unless you are prepared to deal with
|
1255
|
+
* concurrent-update conditions. Use simple_table_tuple_update instead.
|
1256
|
+
*
|
1257
|
+
* Input parameters:
|
1258
|
+
* relation - table to be modified (caller must hold suitable lock)
|
1259
|
+
* otid - TID of old tuple to be replaced
|
1260
|
+
* slot - newly constructed tuple data to store
|
1261
|
+
* cid - update command ID (used for visibility test, and stored into
|
1262
|
+
* cmax/cmin if successful)
|
1263
|
+
* crosscheck - if not InvalidSnapshot, also check old tuple against this
|
1264
|
+
* wait - true if should wait for any conflicting update to commit/abort
|
1265
|
+
* Output parameters:
|
1266
|
+
* tmfd - filled in failure cases (see below)
|
1267
|
+
* lockmode - filled with lock mode acquired on tuple
|
1268
|
+
* update_indexes - in success cases this is set to true if new index entries
|
1269
|
+
* are required for this tuple
|
1270
|
+
*
|
1271
|
+
* Normal, successful return value is TM_Ok, which means we did actually
|
1272
|
+
* update it. Failure return codes are TM_SelfModified, TM_Updated, and
|
1273
|
+
* TM_BeingModified (the last only possible if wait == false).
|
1274
|
+
*
|
1275
|
+
* On success, the slot's tts_tid and tts_tableOid are updated to match the new
|
1276
|
+
* stored tuple; in particular, slot->tts_tid is set to the TID where the
|
1277
|
+
* new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
|
1278
|
+
* update was done. However, any TOAST changes in the new tuple's
|
1279
|
+
* data are not reflected into *newtup.
|
1280
|
+
*
|
1281
|
+
* In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
|
1282
|
+
* t_xmax, and, if possible, t_cmax. See comments for struct TM_FailureData
|
1283
|
+
* for additional info.
|
1284
|
+
*/
|
1285
|
+
static inline TM_Result
|
1286
|
+
table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot,
|
1287
|
+
CommandId cid, Snapshot snapshot, Snapshot crosscheck,
|
1288
|
+
bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode,
|
1289
|
+
bool *update_indexes)
|
1290
|
+
{
|
1291
|
+
return rel->rd_tableam->tuple_update(rel, otid, slot,
|
1292
|
+
cid, snapshot, crosscheck,
|
1293
|
+
wait, tmfd,
|
1294
|
+
lockmode, update_indexes);
|
1295
|
+
}
|
1296
|
+
|
1297
|
+
/*
|
1298
|
+
* Lock a tuple in the specified mode.
|
1299
|
+
*
|
1300
|
+
* Input parameters:
|
1301
|
+
* relation: relation containing tuple (caller must hold suitable lock)
|
1302
|
+
* tid: TID of tuple to lock
|
1303
|
+
* snapshot: snapshot to use for visibility determinations
|
1304
|
+
* cid: current command ID (used for visibility test, and stored into
|
1305
|
+
* tuple's cmax if lock is successful)
|
1306
|
+
* mode: lock mode desired
|
1307
|
+
* wait_policy: what to do if tuple lock is not available
|
1308
|
+
* flags:
|
1309
|
+
* If TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS, follow the update chain to
|
1310
|
+
* also lock descendant tuples if lock modes don't conflict.
|
1311
|
+
* If TUPLE_LOCK_FLAG_FIND_LAST_VERSION, follow the update chain and lock
|
1312
|
+
* latest version.
|
1313
|
+
*
|
1314
|
+
* Output parameters:
|
1315
|
+
* *slot: contains the target tuple
|
1316
|
+
* *tmfd: filled in failure cases (see below)
|
1317
|
+
*
|
1318
|
+
* Function result may be:
|
1319
|
+
* TM_Ok: lock was successfully acquired
|
1320
|
+
* TM_Invisible: lock failed because tuple was never visible to us
|
1321
|
+
* TM_SelfModified: lock failed because tuple updated by self
|
1322
|
+
* TM_Updated: lock failed because tuple updated by other xact
|
1323
|
+
* TM_Deleted: lock failed because tuple deleted by other xact
|
1324
|
+
* TM_WouldBlock: lock couldn't be acquired and wait_policy is skip
|
1325
|
+
*
|
1326
|
+
* In the failure cases other than TM_Invisible and TM_Deleted, the routine
|
1327
|
+
* fills *tmfd with the tuple's t_ctid, t_xmax, and, if possible, t_cmax. See
|
1328
|
+
* comments for struct TM_FailureData for additional info.
|
1329
|
+
*/
|
1330
|
+
static inline TM_Result
|
1331
|
+
table_tuple_lock(Relation rel, ItemPointer tid, Snapshot snapshot,
|
1332
|
+
TupleTableSlot *slot, CommandId cid, LockTupleMode mode,
|
1333
|
+
LockWaitPolicy wait_policy, uint8 flags,
|
1334
|
+
TM_FailureData *tmfd)
|
1335
|
+
{
|
1336
|
+
return rel->rd_tableam->tuple_lock(rel, tid, snapshot, slot,
|
1337
|
+
cid, mode, wait_policy,
|
1338
|
+
flags, tmfd);
|
1339
|
+
}
|
1340
|
+
|
1341
|
+
/*
|
1342
|
+
* Perform operations necessary to complete insertions made via
|
1343
|
+
* tuple_insert and multi_insert with a BulkInsertState specified.
|
1344
|
+
*/
|
1345
|
+
static inline void
|
1346
|
+
table_finish_bulk_insert(Relation rel, int options)
|
1347
|
+
{
|
1348
|
+
/* optional callback */
|
1349
|
+
if (rel->rd_tableam && rel->rd_tableam->finish_bulk_insert)
|
1350
|
+
rel->rd_tableam->finish_bulk_insert(rel, options);
|
1351
|
+
}
|
1352
|
+
|
1353
|
+
|
1354
|
+
/* ------------------------------------------------------------------------
|
1355
|
+
* DDL related functionality.
|
1356
|
+
* ------------------------------------------------------------------------
|
1357
|
+
*/
|
1358
|
+
|
1359
|
+
/*
|
1360
|
+
* Create storage for `rel` in `newrnode`, with persistence set to
|
1361
|
+
* `persistence`.
|
1362
|
+
*
|
1363
|
+
* This is used both during relation creation and various DDL operations to
|
1364
|
+
* create a new relfilenode that can be filled from scratch. When creating
|
1365
|
+
* new storage for an existing relfilenode, this should be called before the
|
1366
|
+
* relcache entry has been updated.
|
1367
|
+
*
|
1368
|
+
* *freezeXid, *minmulti are set to the xid / multixact horizon for the table
|
1369
|
+
* that pg_class.{relfrozenxid, relminmxid} have to be set to.
|
1370
|
+
*/
|
1371
|
+
static inline void
|
1372
|
+
table_relation_set_new_filenode(Relation rel,
|
1373
|
+
const RelFileNode *newrnode,
|
1374
|
+
char persistence,
|
1375
|
+
TransactionId *freezeXid,
|
1376
|
+
MultiXactId *minmulti)
|
1377
|
+
{
|
1378
|
+
rel->rd_tableam->relation_set_new_filenode(rel, newrnode, persistence,
|
1379
|
+
freezeXid, minmulti);
|
1380
|
+
}
|
1381
|
+
|
1382
|
+
/*
|
1383
|
+
* Remove all table contents from `rel`, in a non-transactional manner.
|
1384
|
+
* Non-transactional meaning that there's no need to support rollbacks. This
|
1385
|
+
* commonly only is used to perform truncations for relfilenodes created in the
|
1386
|
+
* current transaction.
|
1387
|
+
*/
|
1388
|
+
static inline void
|
1389
|
+
table_relation_nontransactional_truncate(Relation rel)
|
1390
|
+
{
|
1391
|
+
rel->rd_tableam->relation_nontransactional_truncate(rel);
|
1392
|
+
}
|
1393
|
+
|
1394
|
+
/*
|
1395
|
+
* Copy data from `rel` into the new relfilenode `newrnode`. The new
|
1396
|
+
* relfilenode may not have storage associated before this function is
|
1397
|
+
* called. This is only supposed to be used for low level operations like
|
1398
|
+
* changing a relation's tablespace.
|
1399
|
+
*/
|
1400
|
+
static inline void
|
1401
|
+
table_relation_copy_data(Relation rel, const RelFileNode *newrnode)
|
1402
|
+
{
|
1403
|
+
rel->rd_tableam->relation_copy_data(rel, newrnode);
|
1404
|
+
}
|
1405
|
+
|
1406
|
+
/*
|
1407
|
+
* Copy data from `OldTable` into `NewTable`, as part of a CLUSTER or VACUUM
|
1408
|
+
* FULL.
|
1409
|
+
*
|
1410
|
+
* Additional Input parameters:
|
1411
|
+
* - use_sort - if true, the table contents are sorted appropriate for
|
1412
|
+
* `OldIndex`; if false and OldIndex is not InvalidOid, the data is copied
|
1413
|
+
* in that index's order; if false and OldIndex is InvalidOid, no sorting is
|
1414
|
+
* performed
|
1415
|
+
* - OldIndex - see use_sort
|
1416
|
+
* - OldestXmin - computed by vacuum_set_xid_limits(), even when
|
1417
|
+
* not needed for the relation's AM
|
1418
|
+
* - *xid_cutoff - ditto
|
1419
|
+
* - *multi_cutoff - ditto
|
1420
|
+
*
|
1421
|
+
* Output parameters:
|
1422
|
+
* - *xid_cutoff - rel's new relfrozenxid value, may be invalid
|
1423
|
+
* - *multi_cutoff - rel's new relminmxid value, may be invalid
|
1424
|
+
* - *tups_vacuumed - stats, for logging, if appropriate for AM
|
1425
|
+
* - *tups_recently_dead - stats, for logging, if appropriate for AM
|
1426
|
+
*/
|
1427
|
+
static inline void
|
1428
|
+
table_relation_copy_for_cluster(Relation OldTable, Relation NewTable,
|
1429
|
+
Relation OldIndex,
|
1430
|
+
bool use_sort,
|
1431
|
+
TransactionId OldestXmin,
|
1432
|
+
TransactionId *xid_cutoff,
|
1433
|
+
MultiXactId *multi_cutoff,
|
1434
|
+
double *num_tuples,
|
1435
|
+
double *tups_vacuumed,
|
1436
|
+
double *tups_recently_dead)
|
1437
|
+
{
|
1438
|
+
OldTable->rd_tableam->relation_copy_for_cluster(OldTable, NewTable, OldIndex,
|
1439
|
+
use_sort, OldestXmin,
|
1440
|
+
xid_cutoff, multi_cutoff,
|
1441
|
+
num_tuples, tups_vacuumed,
|
1442
|
+
tups_recently_dead);
|
1443
|
+
}
|
1444
|
+
|
1445
|
+
/*
|
1446
|
+
* Perform VACUUM on the relation. The VACUUM can be triggered by a user or by
|
1447
|
+
* autovacuum. The specific actions performed by the AM will depend heavily on
|
1448
|
+
* the individual AM.
|
1449
|
+
*
|
1450
|
+
* On entry a transaction needs to already been established, and the
|
1451
|
+
* table is locked with a ShareUpdateExclusive lock.
|
1452
|
+
*
|
1453
|
+
* Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through this
|
1454
|
+
* routine, even if (for ANALYZE) it is part of the same VACUUM command.
|
1455
|
+
*/
|
1456
|
+
static inline void
|
1457
|
+
table_relation_vacuum(Relation rel, struct VacuumParams *params,
|
1458
|
+
BufferAccessStrategy bstrategy)
|
1459
|
+
{
|
1460
|
+
rel->rd_tableam->relation_vacuum(rel, params, bstrategy);
|
1461
|
+
}
|
1462
|
+
|
1463
|
+
/*
|
1464
|
+
* Prepare to analyze block `blockno` of `scan`. The scan needs to have been
|
1465
|
+
* started with table_beginscan_analyze(). Note that this routine might
|
1466
|
+
* acquire resources like locks that are held until
|
1467
|
+
* table_scan_analyze_next_tuple() returns false.
|
1468
|
+
*
|
1469
|
+
* Returns false if block is unsuitable for sampling, true otherwise.
|
1470
|
+
*/
|
1471
|
+
static inline bool
|
1472
|
+
table_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno,
|
1473
|
+
BufferAccessStrategy bstrategy)
|
1474
|
+
{
|
1475
|
+
return scan->rs_rd->rd_tableam->scan_analyze_next_block(scan, blockno,
|
1476
|
+
bstrategy);
|
1477
|
+
}
|
1478
|
+
|
1479
|
+
/*
|
1480
|
+
* Iterate over tuples in the block selected with
|
1481
|
+
* table_scan_analyze_next_block() (which needs to have returned true, and
|
1482
|
+
* this routine may not have returned false for the same block before). If a
|
1483
|
+
* tuple that's suitable for sampling is found, true is returned and a tuple
|
1484
|
+
* is stored in `slot`.
|
1485
|
+
*
|
1486
|
+
* *liverows and *deadrows are incremented according to the encountered
|
1487
|
+
* tuples.
|
1488
|
+
*/
|
1489
|
+
static inline bool
|
1490
|
+
table_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin,
|
1491
|
+
double *liverows, double *deadrows,
|
1492
|
+
TupleTableSlot *slot)
|
1493
|
+
{
|
1494
|
+
return scan->rs_rd->rd_tableam->scan_analyze_next_tuple(scan, OldestXmin,
|
1495
|
+
liverows, deadrows,
|
1496
|
+
slot);
|
1497
|
+
}
|
1498
|
+
|
1499
|
+
/*
|
1500
|
+
* table_index_build_scan - scan the table to find tuples to be indexed
|
1501
|
+
*
|
1502
|
+
* This is called back from an access-method-specific index build procedure
|
1503
|
+
* after the AM has done whatever setup it needs. The parent table relation
|
1504
|
+
* is scanned to find tuples that should be entered into the index. Each
|
1505
|
+
* such tuple is passed to the AM's callback routine, which does the right
|
1506
|
+
* things to add it to the new index. After we return, the AM's index
|
1507
|
+
* build procedure does whatever cleanup it needs.
|
1508
|
+
*
|
1509
|
+
* The total count of live tuples is returned. This is for updating pg_class
|
1510
|
+
* statistics. (It's annoying not to be able to do that here, but we want to
|
1511
|
+
* merge that update with others; see index_update_stats.) Note that the
|
1512
|
+
* index AM itself must keep track of the number of index tuples; we don't do
|
1513
|
+
* so here because the AM might reject some of the tuples for its own reasons,
|
1514
|
+
* such as being unable to store NULLs.
|
1515
|
+
*
|
1516
|
+
* If 'progress', the PROGRESS_SCAN_BLOCKS_TOTAL counter is updated when
|
1517
|
+
* starting the scan, and PROGRESS_SCAN_BLOCKS_DONE is updated as we go along.
|
1518
|
+
*
|
1519
|
+
* A side effect is to set indexInfo->ii_BrokenHotChain to true if we detect
|
1520
|
+
* any potentially broken HOT chains. Currently, we set this if there are any
|
1521
|
+
* RECENTLY_DEAD or DELETE_IN_PROGRESS entries in a HOT chain, without trying
|
1522
|
+
* very hard to detect whether they're really incompatible with the chain tip.
|
1523
|
+
* This only really makes sense for heap AM, it might need to be generalized
|
1524
|
+
* for other AMs later.
|
1525
|
+
*/
|
1526
|
+
static inline double
|
1527
|
+
table_index_build_scan(Relation table_rel,
|
1528
|
+
Relation index_rel,
|
1529
|
+
struct IndexInfo *index_info,
|
1530
|
+
bool allow_sync,
|
1531
|
+
bool progress,
|
1532
|
+
IndexBuildCallback callback,
|
1533
|
+
void *callback_state,
|
1534
|
+
TableScanDesc scan)
|
1535
|
+
{
|
1536
|
+
return table_rel->rd_tableam->index_build_range_scan(table_rel,
|
1537
|
+
index_rel,
|
1538
|
+
index_info,
|
1539
|
+
allow_sync,
|
1540
|
+
false,
|
1541
|
+
progress,
|
1542
|
+
0,
|
1543
|
+
InvalidBlockNumber,
|
1544
|
+
callback,
|
1545
|
+
callback_state,
|
1546
|
+
scan);
|
1547
|
+
}
|
1548
|
+
|
1549
|
+
/*
|
1550
|
+
* As table_index_build_scan(), except that instead of scanning the complete
|
1551
|
+
* table, only the given number of blocks are scanned. Scan to end-of-rel can
|
1552
|
+
* be signaled by passing InvalidBlockNumber as numblocks. Note that
|
1553
|
+
* restricting the range to scan cannot be done when requesting syncscan.
|
1554
|
+
*
|
1555
|
+
* When "anyvisible" mode is requested, all tuples visible to any transaction
|
1556
|
+
* are indexed and counted as live, including those inserted or deleted by
|
1557
|
+
* transactions that are still in progress.
|
1558
|
+
*/
|
1559
|
+
static inline double
|
1560
|
+
table_index_build_range_scan(Relation table_rel,
|
1561
|
+
Relation index_rel,
|
1562
|
+
struct IndexInfo *index_info,
|
1563
|
+
bool allow_sync,
|
1564
|
+
bool anyvisible,
|
1565
|
+
bool progress,
|
1566
|
+
BlockNumber start_blockno,
|
1567
|
+
BlockNumber numblocks,
|
1568
|
+
IndexBuildCallback callback,
|
1569
|
+
void *callback_state,
|
1570
|
+
TableScanDesc scan)
|
1571
|
+
{
|
1572
|
+
return table_rel->rd_tableam->index_build_range_scan(table_rel,
|
1573
|
+
index_rel,
|
1574
|
+
index_info,
|
1575
|
+
allow_sync,
|
1576
|
+
anyvisible,
|
1577
|
+
progress,
|
1578
|
+
start_blockno,
|
1579
|
+
numblocks,
|
1580
|
+
callback,
|
1581
|
+
callback_state,
|
1582
|
+
scan);
|
1583
|
+
}
|
1584
|
+
|
1585
|
+
/*
|
1586
|
+
* table_index_validate_scan - second table scan for concurrent index build
|
1587
|
+
*
|
1588
|
+
* See validate_index() for an explanation.
|
1589
|
+
*/
|
1590
|
+
static inline void
|
1591
|
+
table_index_validate_scan(Relation table_rel,
|
1592
|
+
Relation index_rel,
|
1593
|
+
struct IndexInfo *index_info,
|
1594
|
+
Snapshot snapshot,
|
1595
|
+
struct ValidateIndexState *state)
|
1596
|
+
{
|
1597
|
+
table_rel->rd_tableam->index_validate_scan(table_rel,
|
1598
|
+
index_rel,
|
1599
|
+
index_info,
|
1600
|
+
snapshot,
|
1601
|
+
state);
|
1602
|
+
}
|
1603
|
+
|
1604
|
+
|
1605
|
+
/* ----------------------------------------------------------------------------
|
1606
|
+
* Miscellaneous functionality
|
1607
|
+
* ----------------------------------------------------------------------------
|
1608
|
+
*/
|
1609
|
+
|
1610
|
+
/*
|
1611
|
+
* Return the current size of `rel` in bytes. If `forkNumber` is
|
1612
|
+
* InvalidForkNumber, return the relation's overall size, otherwise the size
|
1613
|
+
* for the indicated fork.
|
1614
|
+
*
|
1615
|
+
* Note that the overall size might not be the equivalent of the sum of sizes
|
1616
|
+
* for the individual forks for some AMs, e.g. because the AMs storage does
|
1617
|
+
* not neatly map onto the builtin types of forks.
|
1618
|
+
*/
|
1619
|
+
static inline uint64
|
1620
|
+
table_relation_size(Relation rel, ForkNumber forkNumber)
|
1621
|
+
{
|
1622
|
+
return rel->rd_tableam->relation_size(rel, forkNumber);
|
1623
|
+
}
|
1624
|
+
|
1625
|
+
/*
|
1626
|
+
* table_relation_needs_toast_table - does this relation need a toast table?
|
1627
|
+
*/
|
1628
|
+
static inline bool
|
1629
|
+
table_relation_needs_toast_table(Relation rel)
|
1630
|
+
{
|
1631
|
+
return rel->rd_tableam->relation_needs_toast_table(rel);
|
1632
|
+
}
|
1633
|
+
|
1634
|
+
/*
|
1635
|
+
* Return the OID of the AM that should be used to implement the TOAST table
|
1636
|
+
* for this relation.
|
1637
|
+
*/
|
1638
|
+
static inline Oid
|
1639
|
+
table_relation_toast_am(Relation rel)
|
1640
|
+
{
|
1641
|
+
return rel->rd_tableam->relation_toast_am(rel);
|
1642
|
+
}
|
1643
|
+
|
1644
|
+
/*
|
1645
|
+
* Fetch all or part of a TOAST value from a TOAST table.
|
1646
|
+
*
|
1647
|
+
* If this AM is never used to implement a TOAST table, then this callback
|
1648
|
+
* is not needed. But, if toasted values are ever stored in a table of this
|
1649
|
+
* type, then you will need this callback.
|
1650
|
+
*
|
1651
|
+
* toastrel is the relation in which the toasted value is stored.
|
1652
|
+
*
|
1653
|
+
* valueid identifes which toast value is to be fetched. For the heap,
|
1654
|
+
* this corresponds to the values stored in the chunk_id column.
|
1655
|
+
*
|
1656
|
+
* attrsize is the total size of the toast value to be fetched.
|
1657
|
+
*
|
1658
|
+
* sliceoffset is the offset within the toast value of the first byte that
|
1659
|
+
* should be fetched.
|
1660
|
+
*
|
1661
|
+
* slicelength is the number of bytes from the toast value that should be
|
1662
|
+
* fetched.
|
1663
|
+
*
|
1664
|
+
* result is caller-allocated space into which the fetched bytes should be
|
1665
|
+
* stored.
|
1666
|
+
*/
|
1667
|
+
static inline void
|
1668
|
+
table_relation_fetch_toast_slice(Relation toastrel, Oid valueid,
|
1669
|
+
int32 attrsize, int32 sliceoffset,
|
1670
|
+
int32 slicelength, struct varlena *result)
|
1671
|
+
{
|
1672
|
+
toastrel->rd_tableam->relation_fetch_toast_slice(toastrel, valueid,
|
1673
|
+
attrsize,
|
1674
|
+
sliceoffset, slicelength,
|
1675
|
+
result);
|
1676
|
+
}
|
1677
|
+
|
1678
|
+
|
1679
|
+
/* ----------------------------------------------------------------------------
|
1680
|
+
* Planner related functionality
|
1681
|
+
* ----------------------------------------------------------------------------
|
1682
|
+
*/
|
1683
|
+
|
1684
|
+
/*
|
1685
|
+
* Estimate the current size of the relation, as an AM specific workhorse for
|
1686
|
+
* estimate_rel_size(). Look there for an explanation of the parameters.
|
1687
|
+
*/
|
1688
|
+
static inline void
|
1689
|
+
table_relation_estimate_size(Relation rel, int32 *attr_widths,
|
1690
|
+
BlockNumber *pages, double *tuples,
|
1691
|
+
double *allvisfrac)
|
1692
|
+
{
|
1693
|
+
rel->rd_tableam->relation_estimate_size(rel, attr_widths, pages, tuples,
|
1694
|
+
allvisfrac);
|
1695
|
+
}
|
1696
|
+
|
1697
|
+
|
1698
|
+
/* ----------------------------------------------------------------------------
|
1699
|
+
* Executor related functionality
|
1700
|
+
* ----------------------------------------------------------------------------
|
1701
|
+
*/
|
1702
|
+
|
1703
|
+
/*
|
1704
|
+
* Prepare to fetch / check / return tuples from `tbmres->blockno` as part of
|
1705
|
+
* a bitmap table scan. `scan` needs to have been started via
|
1706
|
+
* table_beginscan_bm(). Returns false if there are no tuples to be found on
|
1707
|
+
* the page, true otherwise.
|
1708
|
+
*
|
1709
|
+
* Note, this is an optionally implemented function, therefore should only be
|
1710
|
+
* used after verifying the presence (at plan time or such).
|
1711
|
+
*/
|
1712
|
+
static inline bool
|
1713
|
+
table_scan_bitmap_next_block(TableScanDesc scan,
|
1714
|
+
struct TBMIterateResult *tbmres)
|
1715
|
+
{
|
1716
|
+
return scan->rs_rd->rd_tableam->scan_bitmap_next_block(scan,
|
1717
|
+
tbmres);
|
1718
|
+
}
|
1719
|
+
|
1720
|
+
/*
|
1721
|
+
* Fetch the next tuple of a bitmap table scan into `slot` and return true if
|
1722
|
+
* a visible tuple was found, false otherwise.
|
1723
|
+
* table_scan_bitmap_next_block() needs to previously have selected a
|
1724
|
+
* block (i.e. returned true), and no previous
|
1725
|
+
* table_scan_bitmap_next_tuple() for the same block may have
|
1726
|
+
* returned false.
|
1727
|
+
*/
|
1728
|
+
static inline bool
|
1729
|
+
table_scan_bitmap_next_tuple(TableScanDesc scan,
|
1730
|
+
struct TBMIterateResult *tbmres,
|
1731
|
+
TupleTableSlot *slot)
|
1732
|
+
{
|
1733
|
+
return scan->rs_rd->rd_tableam->scan_bitmap_next_tuple(scan,
|
1734
|
+
tbmres,
|
1735
|
+
slot);
|
1736
|
+
}
|
1737
|
+
|
1738
|
+
/*
|
1739
|
+
* Prepare to fetch tuples from the next block in a sample scan. Returns false
|
1740
|
+
* if the sample scan is finished, true otherwise. `scan` needs to have been
|
1741
|
+
* started via table_beginscan_sampling().
|
1742
|
+
*
|
1743
|
+
* This will call the TsmRoutine's NextSampleBlock() callback if necessary
|
1744
|
+
* (i.e. NextSampleBlock is not NULL), or perform a sequential scan over the
|
1745
|
+
* underlying relation.
|
1746
|
+
*/
|
1747
|
+
static inline bool
|
1748
|
+
table_scan_sample_next_block(TableScanDesc scan,
|
1749
|
+
struct SampleScanState *scanstate)
|
1750
|
+
{
|
1751
|
+
return scan->rs_rd->rd_tableam->scan_sample_next_block(scan, scanstate);
|
1752
|
+
}
|
1753
|
+
|
1754
|
+
/*
|
1755
|
+
* Fetch the next sample tuple into `slot` and return true if a visible tuple
|
1756
|
+
* was found, false otherwise. table_scan_sample_next_block() needs to
|
1757
|
+
* previously have selected a block (i.e. returned true), and no previous
|
1758
|
+
* table_scan_sample_next_tuple() for the same block may have returned false.
|
1759
|
+
*
|
1760
|
+
* This will call the TsmRoutine's NextSampleTuple() callback.
|
1761
|
+
*/
|
1762
|
+
static inline bool
|
1763
|
+
table_scan_sample_next_tuple(TableScanDesc scan,
|
1764
|
+
struct SampleScanState *scanstate,
|
1765
|
+
TupleTableSlot *slot)
|
1766
|
+
{
|
1767
|
+
return scan->rs_rd->rd_tableam->scan_sample_next_tuple(scan, scanstate,
|
1768
|
+
slot);
|
1769
|
+
}
|
1770
|
+
|
1771
|
+
|
1772
|
+
/* ----------------------------------------------------------------------------
|
1773
|
+
* Functions to make modifications a bit simpler.
|
1774
|
+
* ----------------------------------------------------------------------------
|
1775
|
+
*/
|
1776
|
+
|
1777
|
+
extern void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot);
|
1778
|
+
extern void simple_table_tuple_delete(Relation rel, ItemPointer tid,
|
1779
|
+
Snapshot snapshot);
|
1780
|
+
extern void simple_table_tuple_update(Relation rel, ItemPointer otid,
|
1781
|
+
TupleTableSlot *slot, Snapshot snapshot,
|
1782
|
+
bool *update_indexes);
|
1783
|
+
|
1784
|
+
|
1785
|
+
/* ----------------------------------------------------------------------------
|
1786
|
+
* Helper functions to implement parallel scans for block oriented AMs.
|
1787
|
+
* ----------------------------------------------------------------------------
|
1788
|
+
*/
|
1789
|
+
|
1790
|
+
extern Size table_block_parallelscan_estimate(Relation rel);
|
1791
|
+
extern Size table_block_parallelscan_initialize(Relation rel,
|
1792
|
+
ParallelTableScanDesc pscan);
|
1793
|
+
extern void table_block_parallelscan_reinitialize(Relation rel,
|
1794
|
+
ParallelTableScanDesc pscan);
|
1795
|
+
extern BlockNumber table_block_parallelscan_nextpage(Relation rel,
|
1796
|
+
ParallelBlockTableScanDesc pbscan);
|
1797
|
+
extern void table_block_parallelscan_startblock_init(Relation rel,
|
1798
|
+
ParallelBlockTableScanDesc pbscan);
|
1799
|
+
|
1800
|
+
|
1801
|
+
/* ----------------------------------------------------------------------------
|
1802
|
+
* Helper functions to implement relation sizing for block oriented AMs.
|
1803
|
+
* ----------------------------------------------------------------------------
|
1804
|
+
*/
|
1805
|
+
|
1806
|
+
extern uint64 table_block_relation_size(Relation rel, ForkNumber forkNumber);
|
1807
|
+
extern void table_block_relation_estimate_size(Relation rel,
|
1808
|
+
int32 *attr_widths,
|
1809
|
+
BlockNumber *pages,
|
1810
|
+
double *tuples,
|
1811
|
+
double *allvisfrac,
|
1812
|
+
Size overhead_bytes_per_tuple,
|
1813
|
+
Size usable_bytes_per_page);
|
1814
|
+
|
1815
|
+
/* ----------------------------------------------------------------------------
|
1816
|
+
* Functions in tableamapi.c
|
1817
|
+
* ----------------------------------------------------------------------------
|
1818
|
+
*/
|
1819
|
+
|
1820
|
+
extern const TableAmRoutine *GetTableAmRoutine(Oid amhandler);
|
1821
|
+
extern const TableAmRoutine *GetHeapamTableAmRoutine(void);
|
1822
|
+
extern bool check_default_table_access_method(char **newval, void **extra,
|
1823
|
+
GucSource source);
|
1824
|
+
|
1825
|
+
#endif /* TABLEAM_H */
|