pg_query 1.1.0 → 2.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +163 -52
- data/README.md +80 -69
- data/Rakefile +82 -1
- data/ext/pg_query/extconf.rb +3 -31
- data/ext/pg_query/guc-file.c +0 -0
- data/ext/pg_query/include/access/amapi.h +246 -0
- data/ext/pg_query/include/access/attmap.h +52 -0
- data/ext/pg_query/include/access/attnum.h +64 -0
- data/ext/pg_query/include/access/clog.h +61 -0
- data/ext/pg_query/include/access/commit_ts.h +77 -0
- data/ext/pg_query/include/access/detoast.h +92 -0
- data/ext/pg_query/include/access/genam.h +228 -0
- data/ext/pg_query/include/access/gin.h +78 -0
- data/ext/pg_query/include/access/htup.h +89 -0
- data/ext/pg_query/include/access/htup_details.h +819 -0
- data/ext/pg_query/include/access/itup.h +161 -0
- data/ext/pg_query/include/access/parallel.h +82 -0
- data/ext/pg_query/include/access/printtup.h +35 -0
- data/ext/pg_query/include/access/relation.h +28 -0
- data/ext/pg_query/include/access/relscan.h +176 -0
- data/ext/pg_query/include/access/rmgr.h +35 -0
- data/ext/pg_query/include/access/rmgrlist.h +49 -0
- data/ext/pg_query/include/access/sdir.h +58 -0
- data/ext/pg_query/include/access/skey.h +151 -0
- data/ext/pg_query/include/access/stratnum.h +83 -0
- data/ext/pg_query/include/access/sysattr.h +29 -0
- data/ext/pg_query/include/access/table.h +27 -0
- data/ext/pg_query/include/access/tableam.h +1825 -0
- data/ext/pg_query/include/access/transam.h +265 -0
- data/ext/pg_query/include/access/tupconvert.h +51 -0
- data/ext/pg_query/include/access/tupdesc.h +154 -0
- data/ext/pg_query/include/access/tupmacs.h +247 -0
- data/ext/pg_query/include/access/twophase.h +61 -0
- data/ext/pg_query/include/access/xact.h +463 -0
- data/ext/pg_query/include/access/xlog.h +398 -0
- data/ext/pg_query/include/access/xlog_internal.h +330 -0
- data/ext/pg_query/include/access/xlogdefs.h +109 -0
- data/ext/pg_query/include/access/xloginsert.h +64 -0
- data/ext/pg_query/include/access/xlogreader.h +327 -0
- data/ext/pg_query/include/access/xlogrecord.h +227 -0
- data/ext/pg_query/include/bootstrap/bootstrap.h +62 -0
- data/ext/pg_query/include/c.h +1322 -0
- data/ext/pg_query/include/catalog/catalog.h +42 -0
- data/ext/pg_query/include/catalog/catversion.h +58 -0
- data/ext/pg_query/include/catalog/dependency.h +275 -0
- data/ext/pg_query/include/catalog/genbki.h +64 -0
- data/ext/pg_query/include/catalog/index.h +199 -0
- data/ext/pg_query/include/catalog/indexing.h +366 -0
- data/ext/pg_query/include/catalog/namespace.h +188 -0
- data/ext/pg_query/include/catalog/objectaccess.h +197 -0
- data/ext/pg_query/include/catalog/objectaddress.h +84 -0
- data/ext/pg_query/include/catalog/pg_aggregate.h +176 -0
- data/ext/pg_query/include/catalog/pg_aggregate_d.h +77 -0
- data/ext/pg_query/include/catalog/pg_am.h +60 -0
- data/ext/pg_query/include/catalog/pg_am_d.h +45 -0
- data/ext/pg_query/include/catalog/pg_attribute.h +204 -0
- data/ext/pg_query/include/catalog/pg_attribute_d.h +59 -0
- data/ext/pg_query/include/catalog/pg_authid.h +58 -0
- data/ext/pg_query/include/catalog/pg_authid_d.h +49 -0
- data/ext/pg_query/include/catalog/pg_class.h +200 -0
- data/ext/pg_query/include/catalog/pg_class_d.h +103 -0
- data/ext/pg_query/include/catalog/pg_collation.h +73 -0
- data/ext/pg_query/include/catalog/pg_collation_d.h +45 -0
- data/ext/pg_query/include/catalog/pg_constraint.h +247 -0
- data/ext/pg_query/include/catalog/pg_constraint_d.h +67 -0
- data/ext/pg_query/include/catalog/pg_control.h +250 -0
- data/ext/pg_query/include/catalog/pg_conversion.h +72 -0
- data/ext/pg_query/include/catalog/pg_conversion_d.h +35 -0
- data/ext/pg_query/include/catalog/pg_depend.h +73 -0
- data/ext/pg_query/include/catalog/pg_depend_d.h +34 -0
- data/ext/pg_query/include/catalog/pg_event_trigger.h +51 -0
- data/ext/pg_query/include/catalog/pg_event_trigger_d.h +34 -0
- data/ext/pg_query/include/catalog/pg_index.h +80 -0
- data/ext/pg_query/include/catalog/pg_index_d.h +56 -0
- data/ext/pg_query/include/catalog/pg_language.h +67 -0
- data/ext/pg_query/include/catalog/pg_language_d.h +39 -0
- data/ext/pg_query/include/catalog/pg_namespace.h +59 -0
- data/ext/pg_query/include/catalog/pg_namespace_d.h +34 -0
- data/ext/pg_query/include/catalog/pg_opclass.h +85 -0
- data/ext/pg_query/include/catalog/pg_opclass_d.h +49 -0
- data/ext/pg_query/include/catalog/pg_operator.h +102 -0
- data/ext/pg_query/include/catalog/pg_operator_d.h +106 -0
- data/ext/pg_query/include/catalog/pg_opfamily.h +60 -0
- data/ext/pg_query/include/catalog/pg_opfamily_d.h +47 -0
- data/ext/pg_query/include/catalog/pg_partitioned_table.h +63 -0
- data/ext/pg_query/include/catalog/pg_partitioned_table_d.h +35 -0
- data/ext/pg_query/include/catalog/pg_proc.h +211 -0
- data/ext/pg_query/include/catalog/pg_proc_d.h +99 -0
- data/ext/pg_query/include/catalog/pg_publication.h +115 -0
- data/ext/pg_query/include/catalog/pg_publication_d.h +36 -0
- data/ext/pg_query/include/catalog/pg_replication_origin.h +57 -0
- data/ext/pg_query/include/catalog/pg_replication_origin_d.h +29 -0
- data/ext/pg_query/include/catalog/pg_statistic.h +275 -0
- data/ext/pg_query/include/catalog/pg_statistic_d.h +194 -0
- data/ext/pg_query/include/catalog/pg_statistic_ext.h +74 -0
- data/ext/pg_query/include/catalog/pg_statistic_ext_d.h +40 -0
- data/ext/pg_query/include/catalog/pg_transform.h +45 -0
- data/ext/pg_query/include/catalog/pg_transform_d.h +32 -0
- data/ext/pg_query/include/catalog/pg_trigger.h +137 -0
- data/ext/pg_query/include/catalog/pg_trigger_d.h +106 -0
- data/ext/pg_query/include/catalog/pg_ts_config.h +50 -0
- data/ext/pg_query/include/catalog/pg_ts_config_d.h +32 -0
- data/ext/pg_query/include/catalog/pg_ts_dict.h +54 -0
- data/ext/pg_query/include/catalog/pg_ts_dict_d.h +33 -0
- data/ext/pg_query/include/catalog/pg_ts_parser.h +57 -0
- data/ext/pg_query/include/catalog/pg_ts_parser_d.h +35 -0
- data/ext/pg_query/include/catalog/pg_ts_template.h +48 -0
- data/ext/pg_query/include/catalog/pg_ts_template_d.h +32 -0
- data/ext/pg_query/include/catalog/pg_type.h +372 -0
- data/ext/pg_query/include/catalog/pg_type_d.h +285 -0
- data/ext/pg_query/include/catalog/storage.h +48 -0
- data/ext/pg_query/include/commands/async.h +54 -0
- data/ext/pg_query/include/commands/dbcommands.h +35 -0
- data/ext/pg_query/include/commands/defrem.h +173 -0
- data/ext/pg_query/include/commands/event_trigger.h +88 -0
- data/ext/pg_query/include/commands/explain.h +127 -0
- data/ext/pg_query/include/commands/prepare.h +61 -0
- data/ext/pg_query/include/commands/tablespace.h +67 -0
- data/ext/pg_query/include/commands/trigger.h +277 -0
- data/ext/pg_query/include/commands/user.h +37 -0
- data/ext/pg_query/include/commands/vacuum.h +293 -0
- data/ext/pg_query/include/commands/variable.h +38 -0
- data/ext/pg_query/include/common/file_perm.h +56 -0
- data/ext/pg_query/include/common/hashfn.h +104 -0
- data/ext/pg_query/include/common/ip.h +37 -0
- data/ext/pg_query/include/common/keywords.h +33 -0
- data/ext/pg_query/include/common/kwlookup.h +44 -0
- data/ext/pg_query/include/common/relpath.h +90 -0
- data/ext/pg_query/include/common/string.h +19 -0
- data/ext/pg_query/include/common/unicode_combining_table.h +196 -0
- data/ext/pg_query/include/datatype/timestamp.h +197 -0
- data/ext/pg_query/include/executor/execdesc.h +70 -0
- data/ext/pg_query/include/executor/executor.h +614 -0
- data/ext/pg_query/include/executor/functions.h +41 -0
- data/ext/pg_query/include/executor/instrument.h +101 -0
- data/ext/pg_query/include/executor/spi.h +175 -0
- data/ext/pg_query/include/executor/tablefunc.h +67 -0
- data/ext/pg_query/include/executor/tuptable.h +487 -0
- data/ext/pg_query/include/fmgr.h +775 -0
- data/ext/pg_query/include/funcapi.h +348 -0
- data/ext/pg_query/include/getaddrinfo.h +162 -0
- data/ext/pg_query/include/jit/jit.h +105 -0
- data/ext/pg_query/include/kwlist_d.h +1072 -0
- data/ext/pg_query/include/lib/ilist.h +727 -0
- data/ext/pg_query/include/lib/pairingheap.h +102 -0
- data/ext/pg_query/include/lib/simplehash.h +1059 -0
- data/ext/pg_query/include/lib/stringinfo.h +161 -0
- data/ext/pg_query/include/libpq/auth.h +29 -0
- data/ext/pg_query/include/libpq/crypt.h +46 -0
- data/ext/pg_query/include/libpq/hba.h +140 -0
- data/ext/pg_query/include/libpq/libpq-be.h +326 -0
- data/ext/pg_query/include/libpq/libpq.h +133 -0
- data/ext/pg_query/include/libpq/pqcomm.h +208 -0
- data/ext/pg_query/include/libpq/pqformat.h +210 -0
- data/ext/pg_query/include/libpq/pqsignal.h +42 -0
- data/ext/pg_query/include/mb/pg_wchar.h +672 -0
- data/ext/pg_query/include/mb/stringinfo_mb.h +24 -0
- data/ext/pg_query/include/miscadmin.h +476 -0
- data/ext/pg_query/include/nodes/bitmapset.h +122 -0
- data/ext/pg_query/include/nodes/execnodes.h +2520 -0
- data/ext/pg_query/include/nodes/extensible.h +160 -0
- data/ext/pg_query/include/nodes/lockoptions.h +61 -0
- data/ext/pg_query/include/nodes/makefuncs.h +108 -0
- data/ext/pg_query/include/nodes/memnodes.h +108 -0
- data/ext/pg_query/include/nodes/nodeFuncs.h +162 -0
- data/ext/pg_query/include/nodes/nodes.h +842 -0
- data/ext/pg_query/include/nodes/params.h +170 -0
- data/ext/pg_query/include/nodes/parsenodes.h +3579 -0
- data/ext/pg_query/include/nodes/pathnodes.h +2556 -0
- data/ext/pg_query/include/nodes/pg_list.h +605 -0
- data/ext/pg_query/include/nodes/plannodes.h +1251 -0
- data/ext/pg_query/include/nodes/primnodes.h +1541 -0
- data/ext/pg_query/include/nodes/print.h +34 -0
- data/ext/pg_query/include/nodes/tidbitmap.h +75 -0
- data/ext/pg_query/include/nodes/value.h +61 -0
- data/ext/pg_query/include/optimizer/cost.h +206 -0
- data/ext/pg_query/include/optimizer/geqo.h +88 -0
- data/ext/pg_query/include/optimizer/geqo_gene.h +45 -0
- data/ext/pg_query/include/optimizer/optimizer.h +199 -0
- data/ext/pg_query/include/optimizer/paths.h +249 -0
- data/ext/pg_query/include/optimizer/planmain.h +119 -0
- data/ext/pg_query/include/parser/analyze.h +49 -0
- data/ext/pg_query/include/parser/gram.h +1067 -0
- data/ext/pg_query/include/parser/gramparse.h +75 -0
- data/ext/pg_query/include/parser/kwlist.h +477 -0
- data/ext/pg_query/include/parser/parse_agg.h +68 -0
- data/ext/pg_query/include/parser/parse_clause.h +54 -0
- data/ext/pg_query/include/parser/parse_coerce.h +97 -0
- data/ext/pg_query/include/parser/parse_collate.h +27 -0
- data/ext/pg_query/include/parser/parse_expr.h +26 -0
- data/ext/pg_query/include/parser/parse_func.h +73 -0
- data/ext/pg_query/include/parser/parse_node.h +327 -0
- data/ext/pg_query/include/parser/parse_oper.h +67 -0
- data/ext/pg_query/include/parser/parse_relation.h +123 -0
- data/ext/pg_query/include/parser/parse_target.h +46 -0
- data/ext/pg_query/include/parser/parse_type.h +60 -0
- data/ext/pg_query/include/parser/parser.h +41 -0
- data/ext/pg_query/include/parser/parsetree.h +61 -0
- data/ext/pg_query/include/parser/scanner.h +152 -0
- data/ext/pg_query/include/parser/scansup.h +30 -0
- data/ext/pg_query/include/partitioning/partdefs.h +26 -0
- data/ext/pg_query/include/pg_config.h +988 -0
- data/ext/pg_query/include/pg_config_ext.h +8 -0
- data/ext/pg_query/include/pg_config_manual.h +350 -0
- data/ext/pg_query/include/pg_config_os.h +8 -0
- data/ext/pg_query/include/pg_getopt.h +56 -0
- data/ext/pg_query/include/pg_query.h +121 -0
- data/ext/pg_query/include/pg_query_enum_defs.c +2454 -0
- data/ext/pg_query/include/pg_query_fingerprint_conds.c +875 -0
- data/ext/pg_query/include/pg_query_fingerprint_defs.c +12413 -0
- data/ext/pg_query/include/pg_query_json_helper.c +61 -0
- data/ext/pg_query/include/pg_query_outfuncs_conds.c +686 -0
- data/ext/pg_query/include/pg_query_outfuncs_defs.c +2437 -0
- data/ext/pg_query/include/pg_query_readfuncs_conds.c +222 -0
- data/ext/pg_query/include/pg_query_readfuncs_defs.c +2878 -0
- data/ext/pg_query/include/pg_trace.h +17 -0
- data/ext/pg_query/include/pgstat.h +1487 -0
- data/ext/pg_query/include/pgtime.h +84 -0
- data/ext/pg_query/include/pl_gram.h +385 -0
- data/ext/pg_query/include/pl_reserved_kwlist.h +52 -0
- data/ext/pg_query/include/pl_reserved_kwlist_d.h +114 -0
- data/ext/pg_query/include/pl_unreserved_kwlist.h +112 -0
- data/ext/pg_query/include/pl_unreserved_kwlist_d.h +246 -0
- data/ext/pg_query/include/plerrcodes.h +990 -0
- data/ext/pg_query/include/plpgsql.h +1347 -0
- data/ext/pg_query/include/port.h +524 -0
- data/ext/pg_query/include/port/atomics.h +524 -0
- data/ext/pg_query/include/port/atomics/arch-arm.h +26 -0
- data/ext/pg_query/include/port/atomics/arch-ppc.h +254 -0
- data/ext/pg_query/include/port/atomics/arch-x86.h +252 -0
- data/ext/pg_query/include/port/atomics/fallback.h +170 -0
- data/ext/pg_query/include/port/atomics/generic-gcc.h +286 -0
- data/ext/pg_query/include/port/atomics/generic.h +401 -0
- data/ext/pg_query/include/port/pg_bitutils.h +226 -0
- data/ext/pg_query/include/port/pg_bswap.h +161 -0
- data/ext/pg_query/include/port/pg_crc32c.h +101 -0
- data/ext/pg_query/include/portability/instr_time.h +256 -0
- data/ext/pg_query/include/postgres.h +764 -0
- data/ext/pg_query/include/postgres_ext.h +74 -0
- data/ext/pg_query/include/postmaster/autovacuum.h +83 -0
- data/ext/pg_query/include/postmaster/bgworker.h +161 -0
- data/ext/pg_query/include/postmaster/bgworker_internals.h +64 -0
- data/ext/pg_query/include/postmaster/bgwriter.h +45 -0
- data/ext/pg_query/include/postmaster/fork_process.h +17 -0
- data/ext/pg_query/include/postmaster/interrupt.h +32 -0
- data/ext/pg_query/include/postmaster/pgarch.h +39 -0
- data/ext/pg_query/include/postmaster/postmaster.h +77 -0
- data/ext/pg_query/include/postmaster/syslogger.h +98 -0
- data/ext/pg_query/include/postmaster/walwriter.h +21 -0
- data/ext/pg_query/include/protobuf-c.h +1106 -0
- data/ext/pg_query/include/protobuf-c/protobuf-c.h +1106 -0
- data/ext/pg_query/include/protobuf/pg_query.pb-c.h +10846 -0
- data/ext/pg_query/include/protobuf/pg_query.pb.h +124718 -0
- data/ext/pg_query/include/regex/regex.h +184 -0
- data/ext/pg_query/include/replication/logicallauncher.h +31 -0
- data/ext/pg_query/include/replication/logicalproto.h +110 -0
- data/ext/pg_query/include/replication/logicalworker.h +19 -0
- data/ext/pg_query/include/replication/origin.h +73 -0
- data/ext/pg_query/include/replication/reorderbuffer.h +467 -0
- data/ext/pg_query/include/replication/slot.h +219 -0
- data/ext/pg_query/include/replication/syncrep.h +115 -0
- data/ext/pg_query/include/replication/walreceiver.h +340 -0
- data/ext/pg_query/include/replication/walsender.h +74 -0
- data/ext/pg_query/include/rewrite/prs2lock.h +46 -0
- data/ext/pg_query/include/rewrite/rewriteHandler.h +40 -0
- data/ext/pg_query/include/rewrite/rewriteManip.h +87 -0
- data/ext/pg_query/include/rewrite/rewriteSupport.h +26 -0
- data/ext/pg_query/include/storage/backendid.h +37 -0
- data/ext/pg_query/include/storage/block.h +121 -0
- data/ext/pg_query/include/storage/buf.h +46 -0
- data/ext/pg_query/include/storage/bufmgr.h +292 -0
- data/ext/pg_query/include/storage/bufpage.h +459 -0
- data/ext/pg_query/include/storage/condition_variable.h +62 -0
- data/ext/pg_query/include/storage/dsm.h +61 -0
- data/ext/pg_query/include/storage/dsm_impl.h +75 -0
- data/ext/pg_query/include/storage/fd.h +168 -0
- data/ext/pg_query/include/storage/ipc.h +81 -0
- data/ext/pg_query/include/storage/item.h +19 -0
- data/ext/pg_query/include/storage/itemid.h +184 -0
- data/ext/pg_query/include/storage/itemptr.h +206 -0
- data/ext/pg_query/include/storage/large_object.h +100 -0
- data/ext/pg_query/include/storage/latch.h +190 -0
- data/ext/pg_query/include/storage/lmgr.h +114 -0
- data/ext/pg_query/include/storage/lock.h +612 -0
- data/ext/pg_query/include/storage/lockdefs.h +59 -0
- data/ext/pg_query/include/storage/lwlock.h +232 -0
- data/ext/pg_query/include/storage/lwlocknames.h +51 -0
- data/ext/pg_query/include/storage/off.h +57 -0
- data/ext/pg_query/include/storage/pg_sema.h +61 -0
- data/ext/pg_query/include/storage/pg_shmem.h +90 -0
- data/ext/pg_query/include/storage/pmsignal.h +94 -0
- data/ext/pg_query/include/storage/predicate.h +87 -0
- data/ext/pg_query/include/storage/proc.h +333 -0
- data/ext/pg_query/include/storage/proclist_types.h +51 -0
- data/ext/pg_query/include/storage/procsignal.h +75 -0
- data/ext/pg_query/include/storage/relfilenode.h +99 -0
- data/ext/pg_query/include/storage/s_lock.h +1047 -0
- data/ext/pg_query/include/storage/sharedfileset.h +45 -0
- data/ext/pg_query/include/storage/shm_mq.h +85 -0
- data/ext/pg_query/include/storage/shm_toc.h +58 -0
- data/ext/pg_query/include/storage/shmem.h +81 -0
- data/ext/pg_query/include/storage/sinval.h +153 -0
- data/ext/pg_query/include/storage/sinvaladt.h +43 -0
- data/ext/pg_query/include/storage/smgr.h +109 -0
- data/ext/pg_query/include/storage/spin.h +77 -0
- data/ext/pg_query/include/storage/standby.h +91 -0
- data/ext/pg_query/include/storage/standbydefs.h +74 -0
- data/ext/pg_query/include/storage/sync.h +62 -0
- data/ext/pg_query/include/tcop/cmdtag.h +58 -0
- data/ext/pg_query/include/tcop/cmdtaglist.h +217 -0
- data/ext/pg_query/include/tcop/deparse_utility.h +108 -0
- data/ext/pg_query/include/tcop/dest.h +149 -0
- data/ext/pg_query/include/tcop/fastpath.h +21 -0
- data/ext/pg_query/include/tcop/pquery.h +45 -0
- data/ext/pg_query/include/tcop/tcopprot.h +89 -0
- data/ext/pg_query/include/tcop/utility.h +108 -0
- data/ext/pg_query/include/tsearch/ts_cache.h +98 -0
- data/ext/pg_query/include/utils/acl.h +312 -0
- data/ext/pg_query/include/utils/aclchk_internal.h +45 -0
- data/ext/pg_query/include/utils/array.h +458 -0
- data/ext/pg_query/include/utils/builtins.h +127 -0
- data/ext/pg_query/include/utils/bytea.h +27 -0
- data/ext/pg_query/include/utils/catcache.h +231 -0
- data/ext/pg_query/include/utils/date.h +90 -0
- data/ext/pg_query/include/utils/datetime.h +343 -0
- data/ext/pg_query/include/utils/datum.h +68 -0
- data/ext/pg_query/include/utils/dsa.h +123 -0
- data/ext/pg_query/include/utils/dynahash.h +19 -0
- data/ext/pg_query/include/utils/elog.h +439 -0
- data/ext/pg_query/include/utils/errcodes.h +352 -0
- data/ext/pg_query/include/utils/expandeddatum.h +159 -0
- data/ext/pg_query/include/utils/expandedrecord.h +231 -0
- data/ext/pg_query/include/utils/float.h +356 -0
- data/ext/pg_query/include/utils/fmgroids.h +2657 -0
- data/ext/pg_query/include/utils/fmgrprotos.h +2646 -0
- data/ext/pg_query/include/utils/fmgrtab.h +48 -0
- data/ext/pg_query/include/utils/guc.h +443 -0
- data/ext/pg_query/include/utils/guc_tables.h +272 -0
- data/ext/pg_query/include/utils/hsearch.h +149 -0
- data/ext/pg_query/include/utils/inval.h +64 -0
- data/ext/pg_query/include/utils/lsyscache.h +197 -0
- data/ext/pg_query/include/utils/memdebug.h +82 -0
- data/ext/pg_query/include/utils/memutils.h +225 -0
- data/ext/pg_query/include/utils/numeric.h +76 -0
- data/ext/pg_query/include/utils/palloc.h +136 -0
- data/ext/pg_query/include/utils/partcache.h +102 -0
- data/ext/pg_query/include/utils/pg_locale.h +119 -0
- data/ext/pg_query/include/utils/pg_lsn.h +29 -0
- data/ext/pg_query/include/utils/pidfile.h +56 -0
- data/ext/pg_query/include/utils/plancache.h +235 -0
- data/ext/pg_query/include/utils/portal.h +241 -0
- data/ext/pg_query/include/utils/probes.h +114 -0
- data/ext/pg_query/include/utils/ps_status.h +25 -0
- data/ext/pg_query/include/utils/queryenvironment.h +74 -0
- data/ext/pg_query/include/utils/regproc.h +28 -0
- data/ext/pg_query/include/utils/rel.h +644 -0
- data/ext/pg_query/include/utils/relcache.h +151 -0
- data/ext/pg_query/include/utils/reltrigger.h +81 -0
- data/ext/pg_query/include/utils/resowner.h +86 -0
- data/ext/pg_query/include/utils/rls.h +50 -0
- data/ext/pg_query/include/utils/ruleutils.h +44 -0
- data/ext/pg_query/include/utils/sharedtuplestore.h +61 -0
- data/ext/pg_query/include/utils/snapmgr.h +158 -0
- data/ext/pg_query/include/utils/snapshot.h +206 -0
- data/ext/pg_query/include/utils/sortsupport.h +276 -0
- data/ext/pg_query/include/utils/syscache.h +219 -0
- data/ext/pg_query/include/utils/timeout.h +88 -0
- data/ext/pg_query/include/utils/timestamp.h +116 -0
- data/ext/pg_query/include/utils/tuplesort.h +277 -0
- data/ext/pg_query/include/utils/tuplestore.h +91 -0
- data/ext/pg_query/include/utils/typcache.h +202 -0
- data/ext/pg_query/include/utils/tzparser.h +39 -0
- data/ext/pg_query/include/utils/varlena.h +39 -0
- data/ext/pg_query/include/utils/xml.h +84 -0
- data/ext/pg_query/include/xxhash.h +5445 -0
- data/ext/pg_query/include/xxhash/xxhash.h +5445 -0
- data/ext/pg_query/pg_query.c +104 -0
- data/ext/pg_query/pg_query.pb-c.c +37628 -0
- data/ext/pg_query/pg_query_deparse.c +9953 -0
- data/ext/pg_query/pg_query_fingerprint.c +292 -0
- data/ext/pg_query/pg_query_fingerprint.h +8 -0
- data/ext/pg_query/pg_query_internal.h +24 -0
- data/ext/pg_query/pg_query_json_plpgsql.c +738 -0
- data/ext/pg_query/pg_query_json_plpgsql.h +9 -0
- data/ext/pg_query/pg_query_normalize.c +437 -0
- data/ext/pg_query/pg_query_outfuncs.h +10 -0
- data/ext/pg_query/pg_query_outfuncs_json.c +297 -0
- data/ext/pg_query/pg_query_outfuncs_protobuf.c +237 -0
- data/ext/pg_query/pg_query_parse.c +148 -0
- data/ext/pg_query/pg_query_parse_plpgsql.c +460 -0
- data/ext/pg_query/pg_query_readfuncs.h +11 -0
- data/ext/pg_query/pg_query_readfuncs_protobuf.c +142 -0
- data/ext/pg_query/pg_query_ruby.c +108 -12
- data/ext/pg_query/pg_query_scan.c +173 -0
- data/ext/pg_query/pg_query_split.c +221 -0
- data/ext/pg_query/protobuf-c.c +3660 -0
- data/ext/pg_query/src_backend_catalog_namespace.c +1051 -0
- data/ext/pg_query/src_backend_catalog_pg_proc.c +142 -0
- data/ext/pg_query/src_backend_commands_define.c +117 -0
- data/ext/pg_query/src_backend_libpq_pqcomm.c +651 -0
- data/ext/pg_query/src_backend_nodes_bitmapset.c +513 -0
- data/ext/pg_query/src_backend_nodes_copyfuncs.c +6013 -0
- data/ext/pg_query/src_backend_nodes_equalfuncs.c +4003 -0
- data/ext/pg_query/src_backend_nodes_extensible.c +99 -0
- data/ext/pg_query/src_backend_nodes_list.c +922 -0
- data/ext/pg_query/src_backend_nodes_makefuncs.c +417 -0
- data/ext/pg_query/src_backend_nodes_nodeFuncs.c +1363 -0
- data/ext/pg_query/src_backend_nodes_value.c +84 -0
- data/ext/pg_query/src_backend_parser_gram.c +47456 -0
- data/ext/pg_query/src_backend_parser_parse_expr.c +313 -0
- data/ext/pg_query/src_backend_parser_parser.c +497 -0
- data/ext/pg_query/src_backend_parser_scan.c +7091 -0
- data/ext/pg_query/src_backend_parser_scansup.c +160 -0
- data/ext/pg_query/src_backend_postmaster_postmaster.c +2230 -0
- data/ext/pg_query/src_backend_storage_ipc_ipc.c +192 -0
- data/ext/pg_query/src_backend_storage_lmgr_s_lock.c +370 -0
- data/ext/pg_query/src_backend_tcop_postgres.c +776 -0
- data/ext/pg_query/src_backend_utils_adt_datum.c +326 -0
- data/ext/pg_query/src_backend_utils_adt_expandeddatum.c +98 -0
- data/ext/pg_query/src_backend_utils_adt_format_type.c +136 -0
- data/ext/pg_query/src_backend_utils_adt_ruleutils.c +1683 -0
- data/ext/pg_query/src_backend_utils_error_assert.c +74 -0
- data/ext/pg_query/src_backend_utils_error_elog.c +1748 -0
- data/ext/pg_query/src_backend_utils_fmgr_fmgr.c +570 -0
- data/ext/pg_query/src_backend_utils_hash_dynahash.c +1086 -0
- data/ext/pg_query/src_backend_utils_init_globals.c +168 -0
- data/ext/pg_query/src_backend_utils_mb_mbutils.c +839 -0
- data/ext/pg_query/src_backend_utils_misc_guc.c +1831 -0
- data/ext/pg_query/src_backend_utils_mmgr_aset.c +1560 -0
- data/ext/pg_query/src_backend_utils_mmgr_mcxt.c +1006 -0
- data/ext/pg_query/src_common_encnames.c +158 -0
- data/ext/pg_query/src_common_keywords.c +39 -0
- data/ext/pg_query/src_common_kwlist_d.h +1081 -0
- data/ext/pg_query/src_common_kwlookup.c +91 -0
- data/ext/pg_query/src_common_psprintf.c +158 -0
- data/ext/pg_query/src_common_string.c +86 -0
- data/ext/pg_query/src_common_stringinfo.c +336 -0
- data/ext/pg_query/src_common_wchar.c +1651 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_comp.c +1133 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_funcs.c +877 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_gram.c +6533 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_handler.c +107 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_reserved_kwlist_d.h +123 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_scanner.c +671 -0
- data/ext/pg_query/src_pl_plpgsql_src_pl_unreserved_kwlist_d.h +255 -0
- data/ext/pg_query/src_port_erand48.c +127 -0
- data/ext/pg_query/src_port_pg_bitutils.c +246 -0
- data/ext/pg_query/src_port_pgsleep.c +69 -0
- data/ext/pg_query/src_port_pgstrcasecmp.c +83 -0
- data/ext/pg_query/src_port_qsort.c +240 -0
- data/ext/pg_query/src_port_random.c +31 -0
- data/ext/pg_query/src_port_snprintf.c +1449 -0
- data/ext/pg_query/src_port_strerror.c +324 -0
- data/ext/pg_query/src_port_strnlen.c +39 -0
- data/ext/pg_query/xxhash.c +43 -0
- data/lib/pg_query.rb +7 -4
- data/lib/pg_query/constants.rb +21 -0
- data/lib/pg_query/deparse.rb +16 -1117
- data/lib/pg_query/filter_columns.rb +86 -85
- data/lib/pg_query/fingerprint.rb +122 -87
- data/lib/pg_query/json_field_names.rb +1402 -0
- data/lib/pg_query/node.rb +31 -0
- data/lib/pg_query/param_refs.rb +42 -37
- data/lib/pg_query/parse.rb +220 -200
- data/lib/pg_query/parse_error.rb +1 -1
- data/lib/pg_query/pg_query_pb.rb +3211 -0
- data/lib/pg_query/scan.rb +23 -0
- data/lib/pg_query/treewalker.rb +24 -40
- data/lib/pg_query/truncate.rb +64 -43
- data/lib/pg_query/version.rb +2 -2
- metadata +473 -11
- data/ext/pg_query/pg_query_ruby.h +0 -10
- data/lib/pg_query/deep_dup.rb +0 -16
- data/lib/pg_query/deparse/alter_table.rb +0 -42
- data/lib/pg_query/deparse/interval.rb +0 -105
- data/lib/pg_query/legacy_parsetree.rb +0 -109
- data/lib/pg_query/node_types.rb +0 -284
@@ -0,0 +1,51 @@
|
|
1
|
+
/*-------------------------------------------------------------------------
|
2
|
+
*
|
3
|
+
* proclist_types.h
|
4
|
+
* doubly-linked lists of pgprocnos
|
5
|
+
*
|
6
|
+
* See proclist.h for functions that operate on these types.
|
7
|
+
*
|
8
|
+
* Portions Copyright (c) 2016-2020, PostgreSQL Global Development Group
|
9
|
+
*
|
10
|
+
* IDENTIFICATION
|
11
|
+
* src/include/storage/proclist_types.h
|
12
|
+
*-------------------------------------------------------------------------
|
13
|
+
*/
|
14
|
+
|
15
|
+
#ifndef PROCLIST_TYPES_H
|
16
|
+
#define PROCLIST_TYPES_H
|
17
|
+
|
18
|
+
/*
|
19
|
+
* A node in a doubly-linked list of processes. The link fields contain
|
20
|
+
* the 0-based PGPROC indexes of the next and previous process, or
|
21
|
+
* INVALID_PGPROCNO in the next-link of the last node and the prev-link
|
22
|
+
* of the first node. A node that is currently not in any list
|
23
|
+
* should have next == prev == 0; this is not a possible state for a node
|
24
|
+
* that is in a list, because we disallow circularity.
|
25
|
+
*/
|
26
|
+
typedef struct proclist_node
|
27
|
+
{
|
28
|
+
int next; /* pgprocno of the next PGPROC */
|
29
|
+
int prev; /* pgprocno of the prev PGPROC */
|
30
|
+
} proclist_node;
|
31
|
+
|
32
|
+
/*
|
33
|
+
* Header of a doubly-linked list of PGPROCs, identified by pgprocno.
|
34
|
+
* An empty list is represented by head == tail == INVALID_PGPROCNO.
|
35
|
+
*/
|
36
|
+
typedef struct proclist_head
|
37
|
+
{
|
38
|
+
int head; /* pgprocno of the head PGPROC */
|
39
|
+
int tail; /* pgprocno of the tail PGPROC */
|
40
|
+
} proclist_head;
|
41
|
+
|
42
|
+
/*
|
43
|
+
* List iterator allowing some modifications while iterating.
|
44
|
+
*/
|
45
|
+
typedef struct proclist_mutable_iter
|
46
|
+
{
|
47
|
+
int cur; /* pgprocno of the current PGPROC */
|
48
|
+
int next; /* pgprocno of the next PGPROC */
|
49
|
+
} proclist_mutable_iter;
|
50
|
+
|
51
|
+
#endif /* PROCLIST_TYPES_H */
|
@@ -0,0 +1,75 @@
|
|
1
|
+
/*-------------------------------------------------------------------------
|
2
|
+
*
|
3
|
+
* procsignal.h
|
4
|
+
* Routines for interprocess signaling
|
5
|
+
*
|
6
|
+
*
|
7
|
+
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
8
|
+
* Portions Copyright (c) 1994, Regents of the University of California
|
9
|
+
*
|
10
|
+
* src/include/storage/procsignal.h
|
11
|
+
*
|
12
|
+
*-------------------------------------------------------------------------
|
13
|
+
*/
|
14
|
+
#ifndef PROCSIGNAL_H
|
15
|
+
#define PROCSIGNAL_H
|
16
|
+
|
17
|
+
#include "storage/backendid.h"
|
18
|
+
|
19
|
+
|
20
|
+
/*
|
21
|
+
* Reasons for signaling a Postgres child process (a backend or an auxiliary
|
22
|
+
* process, like checkpointer). We can cope with concurrent signals for different
|
23
|
+
* reasons. However, if the same reason is signaled multiple times in quick
|
24
|
+
* succession, the process is likely to observe only one notification of it.
|
25
|
+
* This is okay for the present uses.
|
26
|
+
*
|
27
|
+
* Also, because of race conditions, it's important that all the signals be
|
28
|
+
* defined so that no harm is done if a process mistakenly receives one.
|
29
|
+
*/
|
30
|
+
typedef enum
|
31
|
+
{
|
32
|
+
PROCSIG_CATCHUP_INTERRUPT, /* sinval catchup interrupt */
|
33
|
+
PROCSIG_NOTIFY_INTERRUPT, /* listen/notify interrupt */
|
34
|
+
PROCSIG_PARALLEL_MESSAGE, /* message from cooperating parallel backend */
|
35
|
+
PROCSIG_WALSND_INIT_STOPPING, /* ask walsenders to prepare for shutdown */
|
36
|
+
PROCSIG_BARRIER, /* global barrier interrupt */
|
37
|
+
|
38
|
+
/* Recovery conflict reasons */
|
39
|
+
PROCSIG_RECOVERY_CONFLICT_DATABASE,
|
40
|
+
PROCSIG_RECOVERY_CONFLICT_TABLESPACE,
|
41
|
+
PROCSIG_RECOVERY_CONFLICT_LOCK,
|
42
|
+
PROCSIG_RECOVERY_CONFLICT_SNAPSHOT,
|
43
|
+
PROCSIG_RECOVERY_CONFLICT_BUFFERPIN,
|
44
|
+
PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK,
|
45
|
+
|
46
|
+
NUM_PROCSIGNALS /* Must be last! */
|
47
|
+
} ProcSignalReason;
|
48
|
+
|
49
|
+
typedef enum
|
50
|
+
{
|
51
|
+
/*
|
52
|
+
* XXX. PROCSIGNAL_BARRIER_PLACEHOLDER should be replaced when the first
|
53
|
+
* real user of the ProcSignalBarrier mechanism is added. It's just here
|
54
|
+
* for now because we can't have an empty enum.
|
55
|
+
*/
|
56
|
+
PROCSIGNAL_BARRIER_PLACEHOLDER = 0
|
57
|
+
} ProcSignalBarrierType;
|
58
|
+
|
59
|
+
/*
|
60
|
+
* prototypes for functions in procsignal.c
|
61
|
+
*/
|
62
|
+
extern Size ProcSignalShmemSize(void);
|
63
|
+
extern void ProcSignalShmemInit(void);
|
64
|
+
|
65
|
+
extern void ProcSignalInit(int pss_idx);
|
66
|
+
extern int SendProcSignal(pid_t pid, ProcSignalReason reason,
|
67
|
+
BackendId backendId);
|
68
|
+
|
69
|
+
extern uint64 EmitProcSignalBarrier(ProcSignalBarrierType type);
|
70
|
+
extern void WaitForProcSignalBarrier(uint64 generation);
|
71
|
+
extern void ProcessProcSignalBarrier(void);
|
72
|
+
|
73
|
+
extern void procsignal_sigusr1_handler(SIGNAL_ARGS);
|
74
|
+
|
75
|
+
#endif /* PROCSIGNAL_H */
|
@@ -0,0 +1,99 @@
|
|
1
|
+
/*-------------------------------------------------------------------------
|
2
|
+
*
|
3
|
+
* relfilenode.h
|
4
|
+
* Physical access information for relations.
|
5
|
+
*
|
6
|
+
*
|
7
|
+
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
8
|
+
* Portions Copyright (c) 1994, Regents of the University of California
|
9
|
+
*
|
10
|
+
* src/include/storage/relfilenode.h
|
11
|
+
*
|
12
|
+
*-------------------------------------------------------------------------
|
13
|
+
*/
|
14
|
+
#ifndef RELFILENODE_H
|
15
|
+
#define RELFILENODE_H
|
16
|
+
|
17
|
+
#include "common/relpath.h"
|
18
|
+
#include "storage/backendid.h"
|
19
|
+
|
20
|
+
/*
|
21
|
+
* RelFileNode must provide all that we need to know to physically access
|
22
|
+
* a relation, with the exception of the backend ID, which can be provided
|
23
|
+
* separately. Note, however, that a "physical" relation is comprised of
|
24
|
+
* multiple files on the filesystem, as each fork is stored as a separate
|
25
|
+
* file, and each fork can be divided into multiple segments. See md.c.
|
26
|
+
*
|
27
|
+
* spcNode identifies the tablespace of the relation. It corresponds to
|
28
|
+
* pg_tablespace.oid.
|
29
|
+
*
|
30
|
+
* dbNode identifies the database of the relation. It is zero for
|
31
|
+
* "shared" relations (those common to all databases of a cluster).
|
32
|
+
* Nonzero dbNode values correspond to pg_database.oid.
|
33
|
+
*
|
34
|
+
* relNode identifies the specific relation. relNode corresponds to
|
35
|
+
* pg_class.relfilenode (NOT pg_class.oid, because we need to be able
|
36
|
+
* to assign new physical files to relations in some situations).
|
37
|
+
* Notice that relNode is only unique within a database in a particular
|
38
|
+
* tablespace.
|
39
|
+
*
|
40
|
+
* Note: spcNode must be GLOBALTABLESPACE_OID if and only if dbNode is
|
41
|
+
* zero. We support shared relations only in the "global" tablespace.
|
42
|
+
*
|
43
|
+
* Note: in pg_class we allow reltablespace == 0 to denote that the
|
44
|
+
* relation is stored in its database's "default" tablespace (as
|
45
|
+
* identified by pg_database.dattablespace). However this shorthand
|
46
|
+
* is NOT allowed in RelFileNode structs --- the real tablespace ID
|
47
|
+
* must be supplied when setting spcNode.
|
48
|
+
*
|
49
|
+
* Note: in pg_class, relfilenode can be zero to denote that the relation
|
50
|
+
* is a "mapped" relation, whose current true filenode number is available
|
51
|
+
* from relmapper.c. Again, this case is NOT allowed in RelFileNodes.
|
52
|
+
*
|
53
|
+
* Note: various places use RelFileNode in hashtable keys. Therefore,
|
54
|
+
* there *must not* be any unused padding bytes in this struct. That
|
55
|
+
* should be safe as long as all the fields are of type Oid.
|
56
|
+
*/
|
57
|
+
typedef struct RelFileNode
|
58
|
+
{
|
59
|
+
Oid spcNode; /* tablespace */
|
60
|
+
Oid dbNode; /* database */
|
61
|
+
Oid relNode; /* relation */
|
62
|
+
} RelFileNode;
|
63
|
+
|
64
|
+
/*
|
65
|
+
* Augmenting a relfilenode with the backend ID provides all the information
|
66
|
+
* we need to locate the physical storage. The backend ID is InvalidBackendId
|
67
|
+
* for regular relations (those accessible to more than one backend), or the
|
68
|
+
* owning backend's ID for backend-local relations. Backend-local relations
|
69
|
+
* are always transient and removed in case of a database crash; they are
|
70
|
+
* never WAL-logged or fsync'd.
|
71
|
+
*/
|
72
|
+
typedef struct RelFileNodeBackend
|
73
|
+
{
|
74
|
+
RelFileNode node;
|
75
|
+
BackendId backend;
|
76
|
+
} RelFileNodeBackend;
|
77
|
+
|
78
|
+
#define RelFileNodeBackendIsTemp(rnode) \
|
79
|
+
((rnode).backend != InvalidBackendId)
|
80
|
+
|
81
|
+
/*
|
82
|
+
* Note: RelFileNodeEquals and RelFileNodeBackendEquals compare relNode first
|
83
|
+
* since that is most likely to be different in two unequal RelFileNodes. It
|
84
|
+
* is probably redundant to compare spcNode if the other fields are found equal,
|
85
|
+
* but do it anyway to be sure. Likewise for checking the backend ID in
|
86
|
+
* RelFileNodeBackendEquals.
|
87
|
+
*/
|
88
|
+
#define RelFileNodeEquals(node1, node2) \
|
89
|
+
((node1).relNode == (node2).relNode && \
|
90
|
+
(node1).dbNode == (node2).dbNode && \
|
91
|
+
(node1).spcNode == (node2).spcNode)
|
92
|
+
|
93
|
+
#define RelFileNodeBackendEquals(node1, node2) \
|
94
|
+
((node1).node.relNode == (node2).node.relNode && \
|
95
|
+
(node1).node.dbNode == (node2).node.dbNode && \
|
96
|
+
(node1).backend == (node2).backend && \
|
97
|
+
(node1).node.spcNode == (node2).node.spcNode)
|
98
|
+
|
99
|
+
#endif /* RELFILENODE_H */
|
@@ -0,0 +1,1047 @@
|
|
1
|
+
/*-------------------------------------------------------------------------
|
2
|
+
*
|
3
|
+
* s_lock.h
|
4
|
+
* Hardware-dependent implementation of spinlocks.
|
5
|
+
*
|
6
|
+
* NOTE: none of the macros in this file are intended to be called directly.
|
7
|
+
* Call them through the hardware-independent macros in spin.h.
|
8
|
+
*
|
9
|
+
* The following hardware-dependent macros must be provided for each
|
10
|
+
* supported platform:
|
11
|
+
*
|
12
|
+
* void S_INIT_LOCK(slock_t *lock)
|
13
|
+
* Initialize a spinlock (to the unlocked state).
|
14
|
+
*
|
15
|
+
* int S_LOCK(slock_t *lock)
|
16
|
+
* Acquire a spinlock, waiting if necessary.
|
17
|
+
* Time out and abort() if unable to acquire the lock in a
|
18
|
+
* "reasonable" amount of time --- typically ~ 1 minute.
|
19
|
+
* Should return number of "delays"; see s_lock.c
|
20
|
+
*
|
21
|
+
* void S_UNLOCK(slock_t *lock)
|
22
|
+
* Unlock a previously acquired lock.
|
23
|
+
*
|
24
|
+
* bool S_LOCK_FREE(slock_t *lock)
|
25
|
+
* Tests if the lock is free. Returns true if free, false if locked.
|
26
|
+
* This does *not* change the state of the lock.
|
27
|
+
*
|
28
|
+
* void SPIN_DELAY(void)
|
29
|
+
* Delay operation to occur inside spinlock wait loop.
|
30
|
+
*
|
31
|
+
* Note to implementors: there are default implementations for all these
|
32
|
+
* macros at the bottom of the file. Check if your platform can use
|
33
|
+
* these or needs to override them.
|
34
|
+
*
|
35
|
+
* Usually, S_LOCK() is implemented in terms of even lower-level macros
|
36
|
+
* TAS() and TAS_SPIN():
|
37
|
+
*
|
38
|
+
* int TAS(slock_t *lock)
|
39
|
+
* Atomic test-and-set instruction. Attempt to acquire the lock,
|
40
|
+
* but do *not* wait. Returns 0 if successful, nonzero if unable
|
41
|
+
* to acquire the lock.
|
42
|
+
*
|
43
|
+
* int TAS_SPIN(slock_t *lock)
|
44
|
+
* Like TAS(), but this version is used when waiting for a lock
|
45
|
+
* previously found to be contended. By default, this is the
|
46
|
+
* same as TAS(), but on some architectures it's better to poll a
|
47
|
+
* contended lock using an unlocked instruction and retry the
|
48
|
+
* atomic test-and-set only when it appears free.
|
49
|
+
*
|
50
|
+
* TAS() and TAS_SPIN() are NOT part of the API, and should never be called
|
51
|
+
* directly.
|
52
|
+
*
|
53
|
+
* CAUTION: on some platforms TAS() and/or TAS_SPIN() may sometimes report
|
54
|
+
* failure to acquire a lock even when the lock is not locked. For example,
|
55
|
+
* on Alpha TAS() will "fail" if interrupted. Therefore a retry loop must
|
56
|
+
* always be used, even if you are certain the lock is free.
|
57
|
+
*
|
58
|
+
* It is the responsibility of these macros to make sure that the compiler
|
59
|
+
* does not re-order accesses to shared memory to precede the actual lock
|
60
|
+
* acquisition, or follow the lock release. Prior to PostgreSQL 9.5, this
|
61
|
+
* was the caller's responsibility, which meant that callers had to use
|
62
|
+
* volatile-qualified pointers to refer to both the spinlock itself and the
|
63
|
+
* shared data being accessed within the spinlocked critical section. This
|
64
|
+
* was notationally awkward, easy to forget (and thus error-prone), and
|
65
|
+
* prevented some useful compiler optimizations. For these reasons, we
|
66
|
+
* now require that the macros themselves prevent compiler re-ordering,
|
67
|
+
* so that the caller doesn't need to take special precautions.
|
68
|
+
*
|
69
|
+
* On platforms with weak memory ordering, the TAS(), TAS_SPIN(), and
|
70
|
+
* S_UNLOCK() macros must further include hardware-level memory fence
|
71
|
+
* instructions to prevent similar re-ordering at the hardware level.
|
72
|
+
* TAS() and TAS_SPIN() must guarantee that loads and stores issued after
|
73
|
+
* the macro are not executed until the lock has been obtained. Conversely,
|
74
|
+
* S_UNLOCK() must guarantee that loads and stores issued before the macro
|
75
|
+
* have been executed before the lock is released.
|
76
|
+
*
|
77
|
+
* On most supported platforms, TAS() uses a tas() function written
|
78
|
+
* in assembly language to execute a hardware atomic-test-and-set
|
79
|
+
* instruction. Equivalent OS-supplied mutex routines could be used too.
|
80
|
+
*
|
81
|
+
* If no system-specific TAS() is available (ie, HAVE_SPINLOCKS is not
|
82
|
+
* defined), then we fall back on an emulation that uses SysV semaphores
|
83
|
+
* (see spin.c). This emulation will be MUCH MUCH slower than a proper TAS()
|
84
|
+
* implementation, because of the cost of a kernel call per lock or unlock.
|
85
|
+
* An old report is that Postgres spends around 40% of its time in semop(2)
|
86
|
+
* when using the SysV semaphore code.
|
87
|
+
*
|
88
|
+
*
|
89
|
+
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
90
|
+
* Portions Copyright (c) 1994, Regents of the University of California
|
91
|
+
*
|
92
|
+
* src/include/storage/s_lock.h
|
93
|
+
*
|
94
|
+
*-------------------------------------------------------------------------
|
95
|
+
*/
|
96
|
+
#ifndef S_LOCK_H
|
97
|
+
#define S_LOCK_H
|
98
|
+
|
99
|
+
#ifdef FRONTEND
|
100
|
+
#error "s_lock.h may not be included from frontend code"
|
101
|
+
#endif
|
102
|
+
|
103
|
+
#ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
|
104
|
+
|
105
|
+
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
|
106
|
+
/*************************************************************************
|
107
|
+
* All the gcc inlines
|
108
|
+
* Gcc consistently defines the CPU as __cpu__.
|
109
|
+
* Other compilers use __cpu or __cpu__ so we test for both in those cases.
|
110
|
+
*/
|
111
|
+
|
112
|
+
/*----------
|
113
|
+
* Standard gcc asm format (assuming "volatile slock_t *lock"):
|
114
|
+
|
115
|
+
__asm__ __volatile__(
|
116
|
+
" instruction \n"
|
117
|
+
" instruction \n"
|
118
|
+
" instruction \n"
|
119
|
+
: "=r"(_res), "+m"(*lock) // return register, in/out lock value
|
120
|
+
: "r"(lock) // lock pointer, in input register
|
121
|
+
: "memory", "cc"); // show clobbered registers here
|
122
|
+
|
123
|
+
* The output-operands list (after first colon) should always include
|
124
|
+
* "+m"(*lock), whether or not the asm code actually refers to this
|
125
|
+
* operand directly. This ensures that gcc believes the value in the
|
126
|
+
* lock variable is used and set by the asm code. Also, the clobbers
|
127
|
+
* list (after third colon) should always include "memory"; this prevents
|
128
|
+
* gcc from thinking it can cache the values of shared-memory fields
|
129
|
+
* across the asm code. Add "cc" if your asm code changes the condition
|
130
|
+
* code register, and also list any temp registers the code uses.
|
131
|
+
*----------
|
132
|
+
*/
|
133
|
+
|
134
|
+
|
135
|
+
#ifdef __i386__ /* 32-bit i386 */
|
136
|
+
#define HAS_TEST_AND_SET
|
137
|
+
|
138
|
+
typedef unsigned char slock_t;
|
139
|
+
|
140
|
+
#define TAS(lock) tas(lock)
|
141
|
+
|
142
|
+
static __inline__ int
|
143
|
+
tas(volatile slock_t *lock)
|
144
|
+
{
|
145
|
+
register slock_t _res = 1;
|
146
|
+
|
147
|
+
/*
|
148
|
+
* Use a non-locking test before asserting the bus lock. Note that the
|
149
|
+
* extra test appears to be a small loss on some x86 platforms and a small
|
150
|
+
* win on others; it's by no means clear that we should keep it.
|
151
|
+
*
|
152
|
+
* When this was last tested, we didn't have separate TAS() and TAS_SPIN()
|
153
|
+
* macros. Nowadays it probably would be better to do a non-locking test
|
154
|
+
* in TAS_SPIN() but not in TAS(), like on x86_64, but no-one's done the
|
155
|
+
* testing to verify that. Without some empirical evidence, better to
|
156
|
+
* leave it alone.
|
157
|
+
*/
|
158
|
+
__asm__ __volatile__(
|
159
|
+
" cmpb $0,%1 \n"
|
160
|
+
" jne 1f \n"
|
161
|
+
" lock \n"
|
162
|
+
" xchgb %0,%1 \n"
|
163
|
+
"1: \n"
|
164
|
+
: "+q"(_res), "+m"(*lock)
|
165
|
+
: /* no inputs */
|
166
|
+
: "memory", "cc");
|
167
|
+
return (int) _res;
|
168
|
+
}
|
169
|
+
|
170
|
+
#define SPIN_DELAY() spin_delay()
|
171
|
+
|
172
|
+
static __inline__ void
|
173
|
+
spin_delay(void)
|
174
|
+
{
|
175
|
+
/*
|
176
|
+
* This sequence is equivalent to the PAUSE instruction ("rep" is
|
177
|
+
* ignored by old IA32 processors if the following instruction is
|
178
|
+
* not a string operation); the IA-32 Architecture Software
|
179
|
+
* Developer's Manual, Vol. 3, Section 7.7.2 describes why using
|
180
|
+
* PAUSE in the inner loop of a spin lock is necessary for good
|
181
|
+
* performance:
|
182
|
+
*
|
183
|
+
* The PAUSE instruction improves the performance of IA-32
|
184
|
+
* processors supporting Hyper-Threading Technology when
|
185
|
+
* executing spin-wait loops and other routines where one
|
186
|
+
* thread is accessing a shared lock or semaphore in a tight
|
187
|
+
* polling loop. When executing a spin-wait loop, the
|
188
|
+
* processor can suffer a severe performance penalty when
|
189
|
+
* exiting the loop because it detects a possible memory order
|
190
|
+
* violation and flushes the core processor's pipeline. The
|
191
|
+
* PAUSE instruction provides a hint to the processor that the
|
192
|
+
* code sequence is a spin-wait loop. The processor uses this
|
193
|
+
* hint to avoid the memory order violation and prevent the
|
194
|
+
* pipeline flush. In addition, the PAUSE instruction
|
195
|
+
* de-pipelines the spin-wait loop to prevent it from
|
196
|
+
* consuming execution resources excessively.
|
197
|
+
*/
|
198
|
+
__asm__ __volatile__(
|
199
|
+
" rep; nop \n");
|
200
|
+
}
|
201
|
+
|
202
|
+
#endif /* __i386__ */
|
203
|
+
|
204
|
+
|
205
|
+
#ifdef __x86_64__ /* AMD Opteron, Intel EM64T */
|
206
|
+
#define HAS_TEST_AND_SET
|
207
|
+
|
208
|
+
typedef unsigned char slock_t;
|
209
|
+
|
210
|
+
#define TAS(lock) tas(lock)
|
211
|
+
|
212
|
+
/*
|
213
|
+
* On Intel EM64T, it's a win to use a non-locking test before the xchg proper,
|
214
|
+
* but only when spinning.
|
215
|
+
*
|
216
|
+
* See also Implementing Scalable Atomic Locks for Multi-Core Intel(tm) EM64T
|
217
|
+
* and IA32, by Michael Chynoweth and Mary R. Lee. As of this writing, it is
|
218
|
+
* available at:
|
219
|
+
* http://software.intel.com/en-us/articles/implementing-scalable-atomic-locks-for-multi-core-intel-em64t-and-ia32-architectures
|
220
|
+
*/
|
221
|
+
#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
|
222
|
+
|
223
|
+
static __inline__ int
|
224
|
+
tas(volatile slock_t *lock)
|
225
|
+
{
|
226
|
+
register slock_t _res = 1;
|
227
|
+
|
228
|
+
__asm__ __volatile__(
|
229
|
+
" lock \n"
|
230
|
+
" xchgb %0,%1 \n"
|
231
|
+
: "+q"(_res), "+m"(*lock)
|
232
|
+
: /* no inputs */
|
233
|
+
: "memory", "cc");
|
234
|
+
return (int) _res;
|
235
|
+
}
|
236
|
+
|
237
|
+
#define SPIN_DELAY() spin_delay()
|
238
|
+
|
239
|
+
static __inline__ void
|
240
|
+
spin_delay(void)
|
241
|
+
{
|
242
|
+
/*
|
243
|
+
* Adding a PAUSE in the spin delay loop is demonstrably a no-op on
|
244
|
+
* Opteron, but it may be of some use on EM64T, so we keep it.
|
245
|
+
*/
|
246
|
+
__asm__ __volatile__(
|
247
|
+
" rep; nop \n");
|
248
|
+
}
|
249
|
+
|
250
|
+
#endif /* __x86_64__ */
|
251
|
+
|
252
|
+
|
253
|
+
#if defined(__ia64__) || defined(__ia64)
|
254
|
+
/*
|
255
|
+
* Intel Itanium, gcc or Intel's compiler.
|
256
|
+
*
|
257
|
+
* Itanium has weak memory ordering, but we rely on the compiler to enforce
|
258
|
+
* strict ordering of accesses to volatile data. In particular, while the
|
259
|
+
* xchg instruction implicitly acts as a memory barrier with 'acquire'
|
260
|
+
* semantics, we do not have an explicit memory fence instruction in the
|
261
|
+
* S_UNLOCK macro. We use a regular assignment to clear the spinlock, and
|
262
|
+
* trust that the compiler marks the generated store instruction with the
|
263
|
+
* ".rel" opcode.
|
264
|
+
*
|
265
|
+
* Testing shows that assumption to hold on gcc, although I could not find
|
266
|
+
* any explicit statement on that in the gcc manual. In Intel's compiler,
|
267
|
+
* the -m[no-]serialize-volatile option controls that, and testing shows that
|
268
|
+
* it is enabled by default.
|
269
|
+
*
|
270
|
+
* While icc accepts gcc asm blocks on x86[_64], this is not true on ia64
|
271
|
+
* (at least not in icc versions before 12.x). So we have to carry a separate
|
272
|
+
* compiler-intrinsic-based implementation for it.
|
273
|
+
*/
|
274
|
+
#define HAS_TEST_AND_SET
|
275
|
+
|
276
|
+
typedef unsigned int slock_t;
|
277
|
+
|
278
|
+
#define TAS(lock) tas(lock)
|
279
|
+
|
280
|
+
/* On IA64, it's a win to use a non-locking test before the xchg proper */
|
281
|
+
#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
|
282
|
+
|
283
|
+
#ifndef __INTEL_COMPILER
|
284
|
+
|
285
|
+
static __inline__ int
|
286
|
+
tas(volatile slock_t *lock)
|
287
|
+
{
|
288
|
+
long int ret;
|
289
|
+
|
290
|
+
__asm__ __volatile__(
|
291
|
+
" xchg4 %0=%1,%2 \n"
|
292
|
+
: "=r"(ret), "+m"(*lock)
|
293
|
+
: "r"(1)
|
294
|
+
: "memory");
|
295
|
+
return (int) ret;
|
296
|
+
}
|
297
|
+
|
298
|
+
#else /* __INTEL_COMPILER */
|
299
|
+
|
300
|
+
static __inline__ int
|
301
|
+
tas(volatile slock_t *lock)
|
302
|
+
{
|
303
|
+
int ret;
|
304
|
+
|
305
|
+
ret = _InterlockedExchange(lock,1); /* this is a xchg asm macro */
|
306
|
+
|
307
|
+
return ret;
|
308
|
+
}
|
309
|
+
|
310
|
+
/* icc can't use the regular gcc S_UNLOCK() macro either in this case */
|
311
|
+
#define S_UNLOCK(lock) \
|
312
|
+
do { __memory_barrier(); *(lock) = 0; } while (0)
|
313
|
+
|
314
|
+
#endif /* __INTEL_COMPILER */
|
315
|
+
#endif /* __ia64__ || __ia64 */
|
316
|
+
|
317
|
+
/*
|
318
|
+
* On ARM and ARM64, we use __sync_lock_test_and_set(int *, int) if available.
|
319
|
+
*
|
320
|
+
* We use the int-width variant of the builtin because it works on more chips
|
321
|
+
* than other widths.
|
322
|
+
*/
|
323
|
+
#if defined(__arm__) || defined(__arm) || defined(__aarch64__) || defined(__aarch64)
|
324
|
+
#ifdef HAVE_GCC__SYNC_INT32_TAS
|
325
|
+
#define HAS_TEST_AND_SET
|
326
|
+
|
327
|
+
#define TAS(lock) tas(lock)
|
328
|
+
|
329
|
+
typedef int slock_t;
|
330
|
+
|
331
|
+
static __inline__ int
|
332
|
+
tas(volatile slock_t *lock)
|
333
|
+
{
|
334
|
+
return __sync_lock_test_and_set(lock, 1);
|
335
|
+
}
|
336
|
+
|
337
|
+
#define S_UNLOCK(lock) __sync_lock_release(lock)
|
338
|
+
|
339
|
+
#endif /* HAVE_GCC__SYNC_INT32_TAS */
|
340
|
+
#endif /* __arm__ || __arm || __aarch64__ || __aarch64 */
|
341
|
+
|
342
|
+
|
343
|
+
/* S/390 and S/390x Linux (32- and 64-bit zSeries) */
|
344
|
+
#if defined(__s390__) || defined(__s390x__)
|
345
|
+
#define HAS_TEST_AND_SET
|
346
|
+
|
347
|
+
typedef unsigned int slock_t;
|
348
|
+
|
349
|
+
#define TAS(lock) tas(lock)
|
350
|
+
|
351
|
+
static __inline__ int
|
352
|
+
tas(volatile slock_t *lock)
|
353
|
+
{
|
354
|
+
int _res = 0;
|
355
|
+
|
356
|
+
__asm__ __volatile__(
|
357
|
+
" cs %0,%3,0(%2) \n"
|
358
|
+
: "+d"(_res), "+m"(*lock)
|
359
|
+
: "a"(lock), "d"(1)
|
360
|
+
: "memory", "cc");
|
361
|
+
return _res;
|
362
|
+
}
|
363
|
+
|
364
|
+
#endif /* __s390__ || __s390x__ */
|
365
|
+
|
366
|
+
|
367
|
+
#if defined(__sparc__) /* Sparc */
|
368
|
+
/*
|
369
|
+
* Solaris has always run sparc processors in TSO (total store) mode, but
|
370
|
+
* linux didn't use to and the *BSDs still don't. So, be careful about
|
371
|
+
* acquire/release semantics. The CPU will treat superfluous membars as
|
372
|
+
* NOPs, so it's just code space.
|
373
|
+
*/
|
374
|
+
#define HAS_TEST_AND_SET
|
375
|
+
|
376
|
+
typedef unsigned char slock_t;
|
377
|
+
|
378
|
+
#define TAS(lock) tas(lock)
|
379
|
+
|
380
|
+
static __inline__ int
|
381
|
+
tas(volatile slock_t *lock)
|
382
|
+
{
|
383
|
+
register slock_t _res;
|
384
|
+
|
385
|
+
/*
|
386
|
+
* See comment in src/backend/port/tas/sunstudio_sparc.s for why this
|
387
|
+
* uses "ldstub", and that file uses "cas". gcc currently generates
|
388
|
+
* sparcv7-targeted binaries, so "cas" use isn't possible.
|
389
|
+
*/
|
390
|
+
__asm__ __volatile__(
|
391
|
+
" ldstub [%2], %0 \n"
|
392
|
+
: "=r"(_res), "+m"(*lock)
|
393
|
+
: "r"(lock)
|
394
|
+
: "memory");
|
395
|
+
#if defined(__sparcv7) || defined(__sparc_v7__)
|
396
|
+
/*
|
397
|
+
* No stbar or membar available, luckily no actually produced hardware
|
398
|
+
* requires a barrier.
|
399
|
+
*/
|
400
|
+
#elif defined(__sparcv8) || defined(__sparc_v8__)
|
401
|
+
/* stbar is available (and required for both PSO, RMO), membar isn't */
|
402
|
+
__asm__ __volatile__ ("stbar \n":::"memory");
|
403
|
+
#else
|
404
|
+
/*
|
405
|
+
* #LoadStore (RMO) | #LoadLoad (RMO) together are the appropriate acquire
|
406
|
+
* barrier for sparcv8+ upwards.
|
407
|
+
*/
|
408
|
+
__asm__ __volatile__ ("membar #LoadStore | #LoadLoad \n":::"memory");
|
409
|
+
#endif
|
410
|
+
return (int) _res;
|
411
|
+
}
|
412
|
+
|
413
|
+
#if defined(__sparcv7) || defined(__sparc_v7__)
|
414
|
+
/*
|
415
|
+
* No stbar or membar available, luckily no actually produced hardware
|
416
|
+
* requires a barrier. We fall through to the default gcc definition of
|
417
|
+
* S_UNLOCK in this case.
|
418
|
+
*/
|
419
|
+
#elif defined(__sparcv8) || defined(__sparc_v8__)
|
420
|
+
/* stbar is available (and required for both PSO, RMO), membar isn't */
|
421
|
+
#define S_UNLOCK(lock) \
|
422
|
+
do \
|
423
|
+
{ \
|
424
|
+
__asm__ __volatile__ ("stbar \n":::"memory"); \
|
425
|
+
*((volatile slock_t *) (lock)) = 0; \
|
426
|
+
} while (0)
|
427
|
+
#else
|
428
|
+
/*
|
429
|
+
* #LoadStore (RMO) | #StoreStore (RMO, PSO) together are the appropriate
|
430
|
+
* release barrier for sparcv8+ upwards.
|
431
|
+
*/
|
432
|
+
#define S_UNLOCK(lock) \
|
433
|
+
do \
|
434
|
+
{ \
|
435
|
+
__asm__ __volatile__ ("membar #LoadStore | #StoreStore \n":::"memory"); \
|
436
|
+
*((volatile slock_t *) (lock)) = 0; \
|
437
|
+
} while (0)
|
438
|
+
#endif
|
439
|
+
|
440
|
+
#endif /* __sparc__ */
|
441
|
+
|
442
|
+
|
443
|
+
/* PowerPC */
|
444
|
+
#if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
|
445
|
+
#define HAS_TEST_AND_SET
|
446
|
+
|
447
|
+
typedef unsigned int slock_t;
|
448
|
+
|
449
|
+
#define TAS(lock) tas(lock)
|
450
|
+
|
451
|
+
/* On PPC, it's a win to use a non-locking test before the lwarx */
|
452
|
+
#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
|
453
|
+
|
454
|
+
/*
|
455
|
+
* The second operand of addi can hold a constant zero or a register number,
|
456
|
+
* hence constraint "=&b" to avoid allocating r0. "b" stands for "address
|
457
|
+
* base register"; most operands having this register-or-zero property are
|
458
|
+
* address bases, e.g. the second operand of lwax.
|
459
|
+
*
|
460
|
+
* NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
|
461
|
+
* an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.
|
462
|
+
* On newer machines, we can use lwsync instead for better performance.
|
463
|
+
*
|
464
|
+
* Ordinarily, we'd code the branches here using GNU-style local symbols, that
|
465
|
+
* is "1f" referencing "1:" and so on. But some people run gcc on AIX with
|
466
|
+
* IBM's assembler as backend, and IBM's assembler doesn't do local symbols.
|
467
|
+
* So hand-code the branch offsets; fortunately, all PPC instructions are
|
468
|
+
* exactly 4 bytes each, so it's not too hard to count.
|
469
|
+
*/
|
470
|
+
static __inline__ int
|
471
|
+
tas(volatile slock_t *lock)
|
472
|
+
{
|
473
|
+
slock_t _t;
|
474
|
+
int _res;
|
475
|
+
|
476
|
+
__asm__ __volatile__(
|
477
|
+
#ifdef USE_PPC_LWARX_MUTEX_HINT
|
478
|
+
" lwarx %0,0,%3,1 \n"
|
479
|
+
#else
|
480
|
+
" lwarx %0,0,%3 \n"
|
481
|
+
#endif
|
482
|
+
" cmpwi %0,0 \n"
|
483
|
+
" bne $+16 \n" /* branch to li %1,1 */
|
484
|
+
" addi %0,%0,1 \n"
|
485
|
+
" stwcx. %0,0,%3 \n"
|
486
|
+
" beq $+12 \n" /* branch to lwsync/isync */
|
487
|
+
" li %1,1 \n"
|
488
|
+
" b $+12 \n" /* branch to end of asm sequence */
|
489
|
+
#ifdef USE_PPC_LWSYNC
|
490
|
+
" lwsync \n"
|
491
|
+
#else
|
492
|
+
" isync \n"
|
493
|
+
#endif
|
494
|
+
" li %1,0 \n"
|
495
|
+
|
496
|
+
: "=&b"(_t), "=r"(_res), "+m"(*lock)
|
497
|
+
: "r"(lock)
|
498
|
+
: "memory", "cc");
|
499
|
+
return _res;
|
500
|
+
}
|
501
|
+
|
502
|
+
/*
|
503
|
+
* PowerPC S_UNLOCK is almost standard but requires a "sync" instruction.
|
504
|
+
* On newer machines, we can use lwsync instead for better performance.
|
505
|
+
*/
|
506
|
+
#ifdef USE_PPC_LWSYNC
|
507
|
+
#define S_UNLOCK(lock) \
|
508
|
+
do \
|
509
|
+
{ \
|
510
|
+
__asm__ __volatile__ (" lwsync \n" ::: "memory"); \
|
511
|
+
*((volatile slock_t *) (lock)) = 0; \
|
512
|
+
} while (0)
|
513
|
+
#else
|
514
|
+
#define S_UNLOCK(lock) \
|
515
|
+
do \
|
516
|
+
{ \
|
517
|
+
__asm__ __volatile__ (" sync \n" ::: "memory"); \
|
518
|
+
*((volatile slock_t *) (lock)) = 0; \
|
519
|
+
} while (0)
|
520
|
+
#endif /* USE_PPC_LWSYNC */
|
521
|
+
|
522
|
+
#endif /* powerpc */
|
523
|
+
|
524
|
+
|
525
|
+
/* Linux Motorola 68k */
|
526
|
+
#if (defined(__mc68000__) || defined(__m68k__)) && defined(__linux__)
|
527
|
+
#define HAS_TEST_AND_SET
|
528
|
+
|
529
|
+
typedef unsigned char slock_t;
|
530
|
+
|
531
|
+
#define TAS(lock) tas(lock)
|
532
|
+
|
533
|
+
static __inline__ int
|
534
|
+
tas(volatile slock_t *lock)
|
535
|
+
{
|
536
|
+
register int rv;
|
537
|
+
|
538
|
+
__asm__ __volatile__(
|
539
|
+
" clrl %0 \n"
|
540
|
+
" tas %1 \n"
|
541
|
+
" sne %0 \n"
|
542
|
+
: "=d"(rv), "+m"(*lock)
|
543
|
+
: /* no inputs */
|
544
|
+
: "memory", "cc");
|
545
|
+
return rv;
|
546
|
+
}
|
547
|
+
|
548
|
+
#endif /* (__mc68000__ || __m68k__) && __linux__ */
|
549
|
+
|
550
|
+
|
551
|
+
/* Motorola 88k */
|
552
|
+
#if defined(__m88k__)
|
553
|
+
#define HAS_TEST_AND_SET
|
554
|
+
|
555
|
+
typedef unsigned int slock_t;
|
556
|
+
|
557
|
+
#define TAS(lock) tas(lock)
|
558
|
+
|
559
|
+
static __inline__ int
|
560
|
+
tas(volatile slock_t *lock)
|
561
|
+
{
|
562
|
+
register slock_t _res = 1;
|
563
|
+
|
564
|
+
__asm__ __volatile__(
|
565
|
+
" xmem %0, %2, %%r0 \n"
|
566
|
+
: "+r"(_res), "+m"(*lock)
|
567
|
+
: "r"(lock)
|
568
|
+
: "memory");
|
569
|
+
return (int) _res;
|
570
|
+
}
|
571
|
+
|
572
|
+
#endif /* __m88k__ */
|
573
|
+
|
574
|
+
|
575
|
+
/*
|
576
|
+
* VAXen -- even multiprocessor ones
|
577
|
+
* (thanks to Tom Ivar Helbekkmo)
|
578
|
+
*/
|
579
|
+
#if defined(__vax__)
|
580
|
+
#define HAS_TEST_AND_SET
|
581
|
+
|
582
|
+
typedef unsigned char slock_t;
|
583
|
+
|
584
|
+
#define TAS(lock) tas(lock)
|
585
|
+
|
586
|
+
static __inline__ int
|
587
|
+
tas(volatile slock_t *lock)
|
588
|
+
{
|
589
|
+
register int _res;
|
590
|
+
|
591
|
+
__asm__ __volatile__(
|
592
|
+
" movl $1, %0 \n"
|
593
|
+
" bbssi $0, (%2), 1f \n"
|
594
|
+
" clrl %0 \n"
|
595
|
+
"1: \n"
|
596
|
+
: "=&r"(_res), "+m"(*lock)
|
597
|
+
: "r"(lock)
|
598
|
+
: "memory");
|
599
|
+
return _res;
|
600
|
+
}
|
601
|
+
|
602
|
+
#endif /* __vax__ */
|
603
|
+
|
604
|
+
|
605
|
+
#if defined(__mips__) && !defined(__sgi) /* non-SGI MIPS */
|
606
|
+
#define HAS_TEST_AND_SET
|
607
|
+
|
608
|
+
typedef unsigned int slock_t;
|
609
|
+
|
610
|
+
#define TAS(lock) tas(lock)
|
611
|
+
|
612
|
+
/*
|
613
|
+
* Original MIPS-I processors lacked the LL/SC instructions, but if we are
|
614
|
+
* so unfortunate as to be running on one of those, we expect that the kernel
|
615
|
+
* will handle the illegal-instruction traps and emulate them for us. On
|
616
|
+
* anything newer (and really, MIPS-I is extinct) LL/SC is the only sane
|
617
|
+
* choice because any other synchronization method must involve a kernel
|
618
|
+
* call. Unfortunately, many toolchains still default to MIPS-I as the
|
619
|
+
* codegen target; if the symbol __mips shows that that's the case, we
|
620
|
+
* have to force the assembler to accept LL/SC.
|
621
|
+
*
|
622
|
+
* R10000 and up processors require a separate SYNC, which has the same
|
623
|
+
* issues as LL/SC.
|
624
|
+
*/
|
625
|
+
#if __mips < 2
|
626
|
+
#define MIPS_SET_MIPS2 " .set mips2 \n"
|
627
|
+
#else
|
628
|
+
#define MIPS_SET_MIPS2
|
629
|
+
#endif
|
630
|
+
|
631
|
+
static __inline__ int
|
632
|
+
tas(volatile slock_t *lock)
|
633
|
+
{
|
634
|
+
register volatile slock_t *_l = lock;
|
635
|
+
register int _res;
|
636
|
+
register int _tmp;
|
637
|
+
|
638
|
+
__asm__ __volatile__(
|
639
|
+
" .set push \n"
|
640
|
+
MIPS_SET_MIPS2
|
641
|
+
" .set noreorder \n"
|
642
|
+
" .set nomacro \n"
|
643
|
+
" ll %0, %2 \n"
|
644
|
+
" or %1, %0, 1 \n"
|
645
|
+
" sc %1, %2 \n"
|
646
|
+
" xori %1, 1 \n"
|
647
|
+
" or %0, %0, %1 \n"
|
648
|
+
" sync \n"
|
649
|
+
" .set pop "
|
650
|
+
: "=&r" (_res), "=&r" (_tmp), "+R" (*_l)
|
651
|
+
: /* no inputs */
|
652
|
+
: "memory");
|
653
|
+
return _res;
|
654
|
+
}
|
655
|
+
|
656
|
+
/* MIPS S_UNLOCK is almost standard but requires a "sync" instruction */
|
657
|
+
#define S_UNLOCK(lock) \
|
658
|
+
do \
|
659
|
+
{ \
|
660
|
+
__asm__ __volatile__( \
|
661
|
+
" .set push \n" \
|
662
|
+
MIPS_SET_MIPS2 \
|
663
|
+
" .set noreorder \n" \
|
664
|
+
" .set nomacro \n" \
|
665
|
+
" sync \n" \
|
666
|
+
" .set pop " \
|
667
|
+
: /* no outputs */ \
|
668
|
+
: /* no inputs */ \
|
669
|
+
: "memory"); \
|
670
|
+
*((volatile slock_t *) (lock)) = 0; \
|
671
|
+
} while (0)
|
672
|
+
|
673
|
+
#endif /* __mips__ && !__sgi */
|
674
|
+
|
675
|
+
|
676
|
+
#if defined(__m32r__) && defined(HAVE_SYS_TAS_H) /* Renesas' M32R */
|
677
|
+
#define HAS_TEST_AND_SET
|
678
|
+
|
679
|
+
#include <sys/tas.h>
|
680
|
+
|
681
|
+
typedef int slock_t;
|
682
|
+
|
683
|
+
#define TAS(lock) tas(lock)
|
684
|
+
|
685
|
+
#endif /* __m32r__ */
|
686
|
+
|
687
|
+
|
688
|
+
#if defined(__sh__) /* Renesas' SuperH */
|
689
|
+
#define HAS_TEST_AND_SET
|
690
|
+
|
691
|
+
typedef unsigned char slock_t;
|
692
|
+
|
693
|
+
#define TAS(lock) tas(lock)
|
694
|
+
|
695
|
+
static __inline__ int
|
696
|
+
tas(volatile slock_t *lock)
|
697
|
+
{
|
698
|
+
register int _res;
|
699
|
+
|
700
|
+
/*
|
701
|
+
* This asm is coded as if %0 could be any register, but actually SuperH
|
702
|
+
* restricts the target of xor-immediate to be R0. That's handled by
|
703
|
+
* the "z" constraint on _res.
|
704
|
+
*/
|
705
|
+
__asm__ __volatile__(
|
706
|
+
" tas.b @%2 \n"
|
707
|
+
" movt %0 \n"
|
708
|
+
" xor #1,%0 \n"
|
709
|
+
: "=z"(_res), "+m"(*lock)
|
710
|
+
: "r"(lock)
|
711
|
+
: "memory", "t");
|
712
|
+
return _res;
|
713
|
+
}
|
714
|
+
|
715
|
+
#endif /* __sh__ */
|
716
|
+
|
717
|
+
|
718
|
+
/* These live in s_lock.c, but only for gcc */
|
719
|
+
|
720
|
+
|
721
|
+
#if defined(__m68k__) && !defined(__linux__) /* non-Linux Motorola 68k */
|
722
|
+
#define HAS_TEST_AND_SET
|
723
|
+
|
724
|
+
typedef unsigned char slock_t;
|
725
|
+
#endif
|
726
|
+
|
727
|
+
/*
|
728
|
+
* Default implementation of S_UNLOCK() for gcc/icc.
|
729
|
+
*
|
730
|
+
* Note that this implementation is unsafe for any platform that can reorder
|
731
|
+
* a memory access (either load or store) after a following store. That
|
732
|
+
* happens not to be possible on x86 and most legacy architectures (some are
|
733
|
+
* single-processor!), but many modern systems have weaker memory ordering.
|
734
|
+
* Those that do must define their own version of S_UNLOCK() rather than
|
735
|
+
* relying on this one.
|
736
|
+
*/
|
737
|
+
#if !defined(S_UNLOCK)
|
738
|
+
#define S_UNLOCK(lock) \
|
739
|
+
do { __asm__ __volatile__("" : : : "memory"); *(lock) = 0; } while (0)
|
740
|
+
#endif
|
741
|
+
|
742
|
+
#endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
|
743
|
+
|
744
|
+
|
745
|
+
|
746
|
+
/*
|
747
|
+
* ---------------------------------------------------------------------
|
748
|
+
* Platforms that use non-gcc inline assembly:
|
749
|
+
* ---------------------------------------------------------------------
|
750
|
+
*/
|
751
|
+
|
752
|
+
#if !defined(HAS_TEST_AND_SET) /* We didn't trigger above, let's try here */
|
753
|
+
|
754
|
+
|
755
|
+
#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
|
756
|
+
/*
|
757
|
+
* HP's PA-RISC
|
758
|
+
*
|
759
|
+
* See src/backend/port/hpux/tas.c.template for details about LDCWX. Because
|
760
|
+
* LDCWX requires a 16-byte-aligned address, we declare slock_t as a 16-byte
|
761
|
+
* struct. The active word in the struct is whichever has the aligned address;
|
762
|
+
* the other three words just sit at -1.
|
763
|
+
*
|
764
|
+
* When using gcc, we can inline the required assembly code.
|
765
|
+
*/
|
766
|
+
#define HAS_TEST_AND_SET
|
767
|
+
|
768
|
+
typedef struct
|
769
|
+
{
|
770
|
+
int sema[4];
|
771
|
+
} slock_t;
|
772
|
+
|
773
|
+
#define TAS_ACTIVE_WORD(lock) ((volatile int *) (((uintptr_t) (lock) + 15) & ~15))
|
774
|
+
|
775
|
+
#if defined(__GNUC__)
|
776
|
+
|
777
|
+
static __inline__ int
|
778
|
+
tas(volatile slock_t *lock)
|
779
|
+
{
|
780
|
+
volatile int *lockword = TAS_ACTIVE_WORD(lock);
|
781
|
+
register int lockval;
|
782
|
+
|
783
|
+
__asm__ __volatile__(
|
784
|
+
" ldcwx 0(0,%2),%0 \n"
|
785
|
+
: "=r"(lockval), "+m"(*lockword)
|
786
|
+
: "r"(lockword)
|
787
|
+
: "memory");
|
788
|
+
return (lockval == 0);
|
789
|
+
}
|
790
|
+
|
791
|
+
/*
|
792
|
+
* The hppa implementation doesn't follow the rules of this files and provides
|
793
|
+
* a gcc specific implementation outside of the above defined(__GNUC__). It
|
794
|
+
* does so to avoid duplication between the HP compiler and gcc. So undefine
|
795
|
+
* the generic fallback S_UNLOCK from above.
|
796
|
+
*/
|
797
|
+
#ifdef S_UNLOCK
|
798
|
+
#undef S_UNLOCK
|
799
|
+
#endif
|
800
|
+
#define S_UNLOCK(lock) \
|
801
|
+
do { \
|
802
|
+
__asm__ __volatile__("" : : : "memory"); \
|
803
|
+
*TAS_ACTIVE_WORD(lock) = -1; \
|
804
|
+
} while (0)
|
805
|
+
|
806
|
+
#endif /* __GNUC__ */
|
807
|
+
|
808
|
+
#define S_INIT_LOCK(lock) \
|
809
|
+
do { \
|
810
|
+
volatile slock_t *lock_ = (lock); \
|
811
|
+
lock_->sema[0] = -1; \
|
812
|
+
lock_->sema[1] = -1; \
|
813
|
+
lock_->sema[2] = -1; \
|
814
|
+
lock_->sema[3] = -1; \
|
815
|
+
} while (0)
|
816
|
+
|
817
|
+
#define S_LOCK_FREE(lock) (*TAS_ACTIVE_WORD(lock) != 0)
|
818
|
+
|
819
|
+
#endif /* __hppa || __hppa__ */
|
820
|
+
|
821
|
+
|
822
|
+
#if defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
|
823
|
+
/*
|
824
|
+
* HP-UX on Itanium, non-gcc/icc compiler
|
825
|
+
*
|
826
|
+
* We assume that the compiler enforces strict ordering of loads/stores on
|
827
|
+
* volatile data (see comments on the gcc-version earlier in this file).
|
828
|
+
* Note that this assumption does *not* hold if you use the
|
829
|
+
* +Ovolatile=__unordered option on the HP-UX compiler, so don't do that.
|
830
|
+
*
|
831
|
+
* See also Implementing Spinlocks on the Intel Itanium Architecture and
|
832
|
+
* PA-RISC, by Tor Ekqvist and David Graves, for more information. As of
|
833
|
+
* this writing, version 1.0 of the manual is available at:
|
834
|
+
* http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
|
835
|
+
*/
|
836
|
+
#define HAS_TEST_AND_SET
|
837
|
+
|
838
|
+
typedef unsigned int slock_t;
|
839
|
+
|
840
|
+
#include <ia64/sys/inline.h>
|
841
|
+
#define TAS(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)
|
842
|
+
/* On IA64, it's a win to use a non-locking test before the xchg proper */
|
843
|
+
#define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
|
844
|
+
#define S_UNLOCK(lock) \
|
845
|
+
do { _Asm_mf(); (*(lock)) = 0; } while (0)
|
846
|
+
|
847
|
+
#endif /* HPUX on IA64, non gcc/icc */
|
848
|
+
|
849
|
+
#if defined(_AIX) /* AIX */
|
850
|
+
/*
|
851
|
+
* AIX (POWER)
|
852
|
+
*/
|
853
|
+
#define HAS_TEST_AND_SET
|
854
|
+
|
855
|
+
#include <sys/atomic_op.h>
|
856
|
+
|
857
|
+
typedef int slock_t;
|
858
|
+
|
859
|
+
#define TAS(lock) _check_lock((slock_t *) (lock), 0, 1)
|
860
|
+
#define S_UNLOCK(lock) _clear_lock((slock_t *) (lock), 0)
|
861
|
+
#endif /* _AIX */
|
862
|
+
|
863
|
+
|
864
|
+
/* These are in sunstudio_(sparc|x86).s */
|
865
|
+
|
866
|
+
#if defined(__SUNPRO_C) && (defined(__i386) || defined(__x86_64__) || defined(__sparc__) || defined(__sparc))
|
867
|
+
#define HAS_TEST_AND_SET
|
868
|
+
|
869
|
+
#if defined(__i386) || defined(__x86_64__) || defined(__sparcv9) || defined(__sparcv8plus)
|
870
|
+
typedef unsigned int slock_t;
|
871
|
+
#else
|
872
|
+
typedef unsigned char slock_t;
|
873
|
+
#endif
|
874
|
+
|
875
|
+
extern slock_t pg_atomic_cas(volatile slock_t *lock, slock_t with,
|
876
|
+
slock_t cmp);
|
877
|
+
|
878
|
+
#define TAS(a) (pg_atomic_cas((a), 1, 0) != 0)
|
879
|
+
#endif
|
880
|
+
|
881
|
+
|
882
|
+
#ifdef _MSC_VER
|
883
|
+
typedef LONG slock_t;
|
884
|
+
|
885
|
+
#define HAS_TEST_AND_SET
|
886
|
+
#define TAS(lock) (InterlockedCompareExchange(lock, 1, 0))
|
887
|
+
|
888
|
+
#define SPIN_DELAY() spin_delay()
|
889
|
+
|
890
|
+
/* If using Visual C++ on Win64, inline assembly is unavailable.
|
891
|
+
* Use a _mm_pause intrinsic instead of rep nop.
|
892
|
+
*/
|
893
|
+
#if defined(_WIN64)
|
894
|
+
static __forceinline void
|
895
|
+
spin_delay(void)
|
896
|
+
{
|
897
|
+
_mm_pause();
|
898
|
+
}
|
899
|
+
#else
|
900
|
+
static __forceinline void
|
901
|
+
spin_delay(void)
|
902
|
+
{
|
903
|
+
/* See comment for gcc code. Same code, MASM syntax */
|
904
|
+
__asm rep nop;
|
905
|
+
}
|
906
|
+
#endif
|
907
|
+
|
908
|
+
#include <intrin.h>
|
909
|
+
#pragma intrinsic(_ReadWriteBarrier)
|
910
|
+
|
911
|
+
#define S_UNLOCK(lock) \
|
912
|
+
do { _ReadWriteBarrier(); (*(lock)) = 0; } while (0)
|
913
|
+
|
914
|
+
#endif
|
915
|
+
|
916
|
+
|
917
|
+
#endif /* !defined(HAS_TEST_AND_SET) */
|
918
|
+
|
919
|
+
|
920
|
+
/* Blow up if we didn't have any way to do spinlocks */
|
921
|
+
#ifndef HAS_TEST_AND_SET
|
922
|
+
#error PostgreSQL does not have native spinlock support on this platform. To continue the compilation, rerun configure using --disable-spinlocks. However, performance will be poor. Please report this to pgsql-bugs@lists.postgresql.org.
|
923
|
+
#endif
|
924
|
+
|
925
|
+
|
926
|
+
#else /* !HAVE_SPINLOCKS */
|
927
|
+
|
928
|
+
|
929
|
+
/*
|
930
|
+
* Fake spinlock implementation using semaphores --- slow and prone
|
931
|
+
* to fall foul of kernel limits on number of semaphores, so don't use this
|
932
|
+
* unless you must! The subroutines appear in spin.c.
|
933
|
+
*/
|
934
|
+
typedef int slock_t;
|
935
|
+
|
936
|
+
extern bool s_lock_free_sema(volatile slock_t *lock);
|
937
|
+
extern void s_unlock_sema(volatile slock_t *lock);
|
938
|
+
extern void s_init_lock_sema(volatile slock_t *lock, bool nested);
|
939
|
+
extern int tas_sema(volatile slock_t *lock);
|
940
|
+
|
941
|
+
#define S_LOCK_FREE(lock) s_lock_free_sema(lock)
|
942
|
+
#define S_UNLOCK(lock) s_unlock_sema(lock)
|
943
|
+
#define S_INIT_LOCK(lock) s_init_lock_sema(lock, false)
|
944
|
+
#define TAS(lock) tas_sema(lock)
|
945
|
+
|
946
|
+
|
947
|
+
#endif /* HAVE_SPINLOCKS */
|
948
|
+
|
949
|
+
|
950
|
+
/*
|
951
|
+
* Default Definitions - override these above as needed.
|
952
|
+
*/
|
953
|
+
|
954
|
+
#if !defined(S_LOCK)
|
955
|
+
#define S_LOCK(lock) \
|
956
|
+
(TAS(lock) ? s_lock((lock), __FILE__, __LINE__, PG_FUNCNAME_MACRO) : 0)
|
957
|
+
#endif /* S_LOCK */
|
958
|
+
|
959
|
+
#if !defined(S_LOCK_FREE)
|
960
|
+
#define S_LOCK_FREE(lock) (*(lock) == 0)
|
961
|
+
#endif /* S_LOCK_FREE */
|
962
|
+
|
963
|
+
#if !defined(S_UNLOCK)
|
964
|
+
/*
|
965
|
+
* Our default implementation of S_UNLOCK is essentially *(lock) = 0. This
|
966
|
+
* is unsafe if the platform can reorder a memory access (either load or
|
967
|
+
* store) after a following store; platforms where this is possible must
|
968
|
+
* define their own S_UNLOCK. But CPU reordering is not the only concern:
|
969
|
+
* if we simply defined S_UNLOCK() as an inline macro, the compiler might
|
970
|
+
* reorder instructions from inside the critical section to occur after the
|
971
|
+
* lock release. Since the compiler probably can't know what the external
|
972
|
+
* function s_unlock is doing, putting the same logic there should be adequate.
|
973
|
+
* A sufficiently-smart globally optimizing compiler could break that
|
974
|
+
* assumption, though, and the cost of a function call for every spinlock
|
975
|
+
* release may hurt performance significantly, so we use this implementation
|
976
|
+
* only for platforms where we don't know of a suitable intrinsic. For the
|
977
|
+
* most part, those are relatively obscure platform/compiler combinations to
|
978
|
+
* which the PostgreSQL project does not have access.
|
979
|
+
*/
|
980
|
+
#define USE_DEFAULT_S_UNLOCK
|
981
|
+
extern void s_unlock(volatile slock_t *lock);
|
982
|
+
#define S_UNLOCK(lock) s_unlock(lock)
|
983
|
+
#endif /* S_UNLOCK */
|
984
|
+
|
985
|
+
#if !defined(S_INIT_LOCK)
|
986
|
+
#define S_INIT_LOCK(lock) S_UNLOCK(lock)
|
987
|
+
#endif /* S_INIT_LOCK */
|
988
|
+
|
989
|
+
#if !defined(SPIN_DELAY)
|
990
|
+
#define SPIN_DELAY() ((void) 0)
|
991
|
+
#endif /* SPIN_DELAY */
|
992
|
+
|
993
|
+
#if !defined(TAS)
|
994
|
+
extern int tas(volatile slock_t *lock); /* in port/.../tas.s, or
|
995
|
+
* s_lock.c */
|
996
|
+
|
997
|
+
#define TAS(lock) tas(lock)
|
998
|
+
#endif /* TAS */
|
999
|
+
|
1000
|
+
#if !defined(TAS_SPIN)
|
1001
|
+
#define TAS_SPIN(lock) TAS(lock)
|
1002
|
+
#endif /* TAS_SPIN */
|
1003
|
+
|
1004
|
+
extern slock_t dummy_spinlock;
|
1005
|
+
|
1006
|
+
/*
|
1007
|
+
* Platform-independent out-of-line support routines
|
1008
|
+
*/
|
1009
|
+
extern int s_lock(volatile slock_t *lock, const char *file, int line, const char *func);
|
1010
|
+
|
1011
|
+
/* Support for dynamic adjustment of spins_per_delay */
|
1012
|
+
#define DEFAULT_SPINS_PER_DELAY 100
|
1013
|
+
|
1014
|
+
extern void set_spins_per_delay(int shared_spins_per_delay);
|
1015
|
+
extern int update_spins_per_delay(int shared_spins_per_delay);
|
1016
|
+
|
1017
|
+
/*
|
1018
|
+
* Support for spin delay which is useful in various places where
|
1019
|
+
* spinlock-like procedures take place.
|
1020
|
+
*/
|
1021
|
+
typedef struct
|
1022
|
+
{
|
1023
|
+
int spins;
|
1024
|
+
int delays;
|
1025
|
+
int cur_delay;
|
1026
|
+
const char *file;
|
1027
|
+
int line;
|
1028
|
+
const char *func;
|
1029
|
+
} SpinDelayStatus;
|
1030
|
+
|
1031
|
+
static inline void
|
1032
|
+
init_spin_delay(SpinDelayStatus *status,
|
1033
|
+
const char *file, int line, const char *func)
|
1034
|
+
{
|
1035
|
+
status->spins = 0;
|
1036
|
+
status->delays = 0;
|
1037
|
+
status->cur_delay = 0;
|
1038
|
+
status->file = file;
|
1039
|
+
status->line = line;
|
1040
|
+
status->func = func;
|
1041
|
+
}
|
1042
|
+
|
1043
|
+
#define init_local_spin_delay(status) init_spin_delay(status, __FILE__, __LINE__, PG_FUNCNAME_MACRO)
|
1044
|
+
void perform_spin_delay(SpinDelayStatus *status);
|
1045
|
+
void finish_spin_delay(SpinDelayStatus *status);
|
1046
|
+
|
1047
|
+
#endif /* S_LOCK_H */
|