@decaf-ts/for-couchdb 0.0.2 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/dist/esm/for-couchdb.bundle.min.esm.js +1 -1
  2. package/dist/esm/for-couchdb.bundle.min.esm.js.LICENSE.txt +0 -11
  3. package/dist/for-couchdb.bundle.min.js +1 -1
  4. package/dist/for-couchdb.bundle.min.js.LICENSE.txt +0 -11
  5. package/docker/.dockerignore +5 -0
  6. package/docker/.env +7 -0
  7. package/docker/Dockerfile-booter +19 -0
  8. package/docker/Dockerfile-couchdb +4 -0
  9. package/docker/couchdb.ini +886 -0
  10. package/docker/docker-compose.yml +109 -0
  11. package/docker/init-cluster.sh +66 -0
  12. package/lib/adapter.cjs +1 -88
  13. package/lib/adapter.d.ts +8 -12
  14. package/lib/esm/adapter.d.ts +8 -12
  15. package/lib/esm/adapter.js +1 -88
  16. package/lib/esm/index.d.ts +4 -1
  17. package/lib/esm/index.js +5 -2
  18. package/lib/esm/indexes/generator.d.ts +1 -1
  19. package/lib/esm/indexes/generator.js +1 -1
  20. package/lib/esm/indexes/index.d.ts +0 -1
  21. package/lib/esm/indexes/index.js +1 -2
  22. package/lib/esm/interfaces/CouchDBRepository.d.ts +1 -1
  23. package/lib/esm/interfaces/CouchDBRepository.js +1 -1
  24. package/lib/esm/query/FromClause.d.ts +1 -1
  25. package/lib/esm/query/FromClause.js +1 -1
  26. package/lib/esm/query/InsertClause.d.ts +1 -1
  27. package/lib/esm/query/InsertClause.js +1 -1
  28. package/lib/esm/query/Paginator.d.ts +1 -1
  29. package/lib/esm/query/Paginator.js +1 -1
  30. package/lib/esm/query/SelectClause.d.ts +1 -1
  31. package/lib/esm/query/SelectClause.js +1 -1
  32. package/lib/esm/query/Statement.d.ts +3 -3
  33. package/lib/esm/query/Statement.js +1 -1
  34. package/lib/esm/query/ValuesClause.d.ts +1 -1
  35. package/lib/esm/query/ValuesClause.js +1 -1
  36. package/lib/esm/query/WhereClause.d.ts +1 -1
  37. package/lib/esm/query/WhereClause.js +1 -1
  38. package/lib/esm/query/constants.d.ts +1 -1
  39. package/lib/esm/query/constants.js +1 -1
  40. package/lib/esm/query/factory.d.ts +1 -1
  41. package/lib/esm/query/factory.js +1 -1
  42. package/lib/esm/query/translate.d.ts +1 -1
  43. package/lib/esm/query/translate.js +1 -1
  44. package/lib/esm/types.d.ts +358 -0
  45. package/lib/esm/types.js +3 -0
  46. package/lib/esm/utils.d.ts +3 -3
  47. package/lib/esm/utils.js +1 -1
  48. package/lib/index.cjs +5 -2
  49. package/lib/index.d.ts +4 -1
  50. package/lib/indexes/generator.cjs +1 -1
  51. package/lib/indexes/generator.d.ts +1 -1
  52. package/lib/indexes/index.cjs +1 -2
  53. package/lib/indexes/index.d.ts +0 -1
  54. package/lib/interfaces/CouchDBRepository.cjs +1 -1
  55. package/lib/interfaces/CouchDBRepository.d.ts +1 -1
  56. package/lib/query/FromClause.cjs +1 -1
  57. package/lib/query/FromClause.d.ts +1 -1
  58. package/lib/query/InsertClause.cjs +1 -1
  59. package/lib/query/InsertClause.d.ts +1 -1
  60. package/lib/query/Paginator.cjs +1 -1
  61. package/lib/query/Paginator.d.ts +1 -1
  62. package/lib/query/SelectClause.cjs +1 -1
  63. package/lib/query/SelectClause.d.ts +1 -1
  64. package/lib/query/Statement.cjs +1 -1
  65. package/lib/query/Statement.d.ts +3 -3
  66. package/lib/query/ValuesClause.cjs +1 -1
  67. package/lib/query/ValuesClause.d.ts +1 -1
  68. package/lib/query/WhereClause.cjs +1 -1
  69. package/lib/query/WhereClause.d.ts +1 -1
  70. package/lib/query/constants.cjs +1 -1
  71. package/lib/query/constants.d.ts +1 -1
  72. package/lib/query/factory.cjs +1 -1
  73. package/lib/query/factory.d.ts +1 -1
  74. package/lib/query/translate.cjs +1 -1
  75. package/lib/query/translate.d.ts +1 -1
  76. package/lib/types.cjs +4 -0
  77. package/lib/types.d.ts +358 -0
  78. package/lib/utils.cjs +1 -1
  79. package/lib/utils.d.ts +3 -3
  80. package/package.json +10 -16
  81. package/lib/esm/indexes/types.d.ts +0 -11
  82. package/lib/esm/indexes/types.js +0 -3
  83. package/lib/indexes/types.cjs +0 -4
  84. package/lib/indexes/types.d.ts +0 -11
@@ -0,0 +1,886 @@
1
+ ; Upgrading CouchDB will overwrite this file.
2
+ [vendor]
3
+ name = The Apache Software Foundation
4
+
5
+ [couchdb]
6
+ uuid =
7
+ database_dir = ./data
8
+ view_index_dir = ./data
9
+ ;util_driver_dir =
10
+ ;plugin_dir =
11
+ ;os_process_timeout = 5000 ; 5 seconds. for view servers.
12
+
13
+ ; Maximum number of .couch files to open at once.
14
+ ; The actual limit may be slightly lower depending on how
15
+ ; many schedulers you have as the allowance is divided evenly
16
+ ; among them.
17
+ ;max_dbs_open = 500
18
+
19
+ ; Method used to compress everything that is appended to database and view index files, except
20
+ ; for attachments (see the attachments section). Available methods are:
21
+ ;
22
+ ; none - no compression
23
+ ; snappy - use google snappy, a very fast compressor/decompressor
24
+ ; deflate_N - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
25
+ ; lowest compression ratio) to 9 (slowest, highest compression ratio)
26
+ ;file_compression = snappy
27
+
28
+ ; Higher values may give better read performance due to less read operations
29
+ ; and/or more OS page cache hits, but they can also increase overall response
30
+ ; time for writes when there are many attachment write requests in parallel.
31
+ ;attachment_stream_buffer_size = 4096
32
+
33
+ ; Default security object for databases if not explicitly set
34
+ ; everyone - same as couchdb 1.0, everyone can read/write
35
+ ; admin_only - only admins can read/write
36
+ ; admin_local - sharded dbs on :5984 are read/write for everyone,
37
+ ; local dbs on :5986 are read/write for admins only
38
+ ;default_security = admin_only
39
+
40
+ ;btree_chunk_size = 1279
41
+ ;maintenance_mode = false
42
+ ;stem_interactive_updates = true
43
+ ;uri_file =
44
+
45
+ ; The speed of processing the _changes feed with doc_ids filter can be
46
+ ; influenced directly with this setting - increase for faster processing at the
47
+ ; expense of more memory usage.
48
+ ;changes_doc_ids_optimization_threshold = 100
49
+
50
+ ; Maximum document ID length. Can be set to an integer or 'infinity'.
51
+ ;max_document_id_length = infinity
52
+
53
+ ; Limit maximum document size. Requests to create / update documents with a body
54
+ ; size larger than this will fail with a 413 http error. This limit applies to
55
+ ; requests which update a single document as well as individual documents from
56
+ ; a _bulk_docs request. The size limit is approximate due to the nature of JSON
57
+ ; encoding.
58
+ ;max_document_size = 8000000 ; bytes
59
+
60
+ ; Maximum attachment size.
61
+ ;max_attachment_size = 1073741824 ; 1 gibibyte
62
+
63
+ ; Do not update the least recently used DB cache on reads, only writes
64
+ ;update_lru_on_read = false
65
+
66
+ ; The default storage engine to use when creating databases
67
+ ; is set as a key into the [couchdb_engines] section.
68
+ ;default_engine = couch
69
+
70
+ ; Enable this to only "soft-delete" databases when DELETE /{db} requests are
71
+ ; made. This will place a .recovery directory in your data directory and
72
+ ; move deleted databases/shards there instead. You can then manually delete
73
+ ; these files later, as desired.
74
+ ;enable_database_recovery = false
75
+
76
+ ; Set the maximum size allowed for a partition. This helps users avoid
77
+ ; inadvertently abusing partitions resulting in hot shards. The default
78
+ ; is 10GiB. A value of 0 or less will disable partition size checks.
79
+ ;max_partition_size = 10737418240
80
+
81
+ ; When true, system databases _users and _replicator are created immediately
82
+ ; on startup if not present.
83
+ ;single_node = false
84
+
85
+ ; Allow edits on the _security object in the user db. By default, it's disabled.
86
+ ;users_db_security_editable = false
87
+
88
+ ; Sets the maximum time that the coordinator node will wait for cluster members
89
+ ; to request attachment data before returning a response to the client.
90
+ ;attachment_writer_timeout = 300000
91
+
92
+ ; Sets the log level for informational compaction related entries.
93
+ ;compaction_log_level = info
94
+
95
+ [purge]
96
+ ; Allowed maximum number of documents in one purge request
97
+ ;max_document_id_number = 100
98
+
99
+ ; Allowed maximum number of accumulated revisions in one purge request
100
+ ;max_revisions_number = 1000
101
+
102
+ ; Allowed durations when index is not updated for local purge checkpoint
103
+ ; document. Default is 24 hours.
104
+ ;index_lag_warn_seconds = 86400
105
+
106
+ [couchdb_engines]
107
+ ; The keys in this section are the filename extension that
108
+ ; the specified engine module will use. This is important so
109
+ ; that couch_server is able to find an existing database without
110
+ ; having to ask every configured engine.
111
+ couch = couch_bt_engine
112
+
113
+ [process_priority]
114
+ ; Selectively disable altering process priorities for modules that request it.
115
+ ; * NOTE: couch_server priority has been shown to lead to CouchDB hangs and
116
+ ; failures on Erlang releases 21.0 - 21.3.8.12 and 22.0 -> 22.2.4. Do not
117
+ ; enable when running with those versions.
118
+ ;couch_server = false
119
+
120
+ [cluster]
121
+ ;q=2
122
+ ;n=3
123
+ ;placement = metro-dc-a:2,metro-dc-b:1
124
+
125
+ ; Supply a comma-delimited list of node names that this node should
126
+ ; contact in order to join a cluster. If a seedlist is configured the ``_up``
127
+ ; endpoint will return a 404 until the node has successfully contacted at
128
+ ; least one of the members of the seedlist and replicated an up-to-date copy
129
+ ; of the ``_nodes``, ``_dbs``, and ``_users`` system databases.
130
+ ;seedlist = couchdb@node1.example.com,couchdb@node2.example.com
131
+
132
+ ; Period in seconds specifying how often to attempt reconnecting to
133
+ ; disconnected nodes. There is a 25% random jitter applied to this
134
+ ; value.
135
+ ;reconnect_interval_sec = 37
136
+
137
+ [chttpd]
138
+ ; These settings affect the main, clustered port (5984 by default).
139
+ port = 5984
140
+ bind_address = 127.0.0.1
141
+ ;backlog = 512
142
+ ;socket_options = [{sndbuf, 262144}, {nodelay, true}]
143
+ ;server_options = [{recbuf, undefined}]
144
+ ;require_valid_user = false
145
+ ;require_valid_user_except_for_up = false
146
+
147
+ ; List of headers that will be kept when the header Prefer: return=minimal is included in a request.
148
+ ; If Server header is left out, Mochiweb will add its own one in.
149
+ ;prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETag, Server, Transfer-Encoding, Vary
150
+
151
+ ; Limit maximum number of databases when tying to get detailed information using
152
+ ; _dbs_info in a request
153
+ ;max_db_number_for_dbs_info_req = 100
154
+
155
+ ; set to true to delay the start of a response until the end has been calculated
156
+ ;buffer_response = false
157
+
158
+ ; authentication handlers
159
+ ;authentication_handlers = {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
160
+ ; uncomment the next line to enable proxy authentication
161
+ ;authentication_handlers = {chttpd_auth, proxy_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
162
+ ; uncomment the next line to enable JWT authentication
163
+ ;authentication_handlers = {chttpd_auth, jwt_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
164
+
165
+ ; prevent non-admins from accessing /_all_dbs and /_dbs_info
166
+ ;admin_only_all_dbs = true
167
+
168
+ ; These options are moved from [httpd]
169
+ ;secure_rewrites = true
170
+ ;allow_jsonp = false
171
+
172
+ enable_cors = true
173
+ ;enable_xframe_options = false
174
+
175
+ ; CouchDB can optionally enforce a maximum uri length;
176
+ ;max_uri_length = 8000
177
+
178
+ ;changes_timeout = 60000
179
+ ;config_whitelist =
180
+ ;rewrite_limit = 100
181
+ ;x_forwarded_host = X-Forwarded-Host
182
+ ;x_forwarded_proto = X-Forwarded-Proto
183
+ ;x_forwarded_ssl = X-Forwarded-Ssl
184
+
185
+ ; Maximum allowed http request size. Applies to both clustered and local port.
186
+ ;max_http_request_size = 4294967296 ; 4GB
187
+
188
+ ; Set to true to decode + to space in db and doc_id parts.
189
+ ;decode_plus_to_space = true
190
+
191
+ ; Set to false to revert to a previous _bulk_get implementation using single
192
+ ; doc fetches internally. Using batches should be faster, however there may be
193
+ ; bugs in the new new implemention, so expose this option to allow reverting to
194
+ ; the old behavior.
195
+ ;bulk_get_use_batches = true
196
+
197
+ ; How often to check for client disconnects while processing streaming
198
+ ; requests such as _all_docs, _find, _changes and views
199
+ ;disconnect_check_msec = 30000
200
+
201
+ ; The amount of jitter to apply to the disconnect_check_msec. That's to avoid a
202
+ ; stampede in case when there are lot of concurrent clients connecting.
203
+ ;disconnect_check_jitter_msec = 15000
204
+
205
+ ; Scrub auth and cookie headers from external json request objects.
206
+ ; Set to false to avoid scrubbing and revert to the previous behavior.
207
+ ;scrub_json_request = true
208
+
209
+ ;[jwt_auth]
210
+ ; List of claims to validate
211
+ ; can be the name of a claim like "exp" or a tuple if the claim requires
212
+ ; a parameter
213
+ ;required_claims = exp, {iss, "IssuerNameHere"}
214
+
215
+ ; roles_claim_name is marked as deprecated. Please use roles_claim_path instead!
216
+ ; Values for ``roles_claim_name`` can only be top-level attributes in the JWT
217
+ ; token. If ``roles_claim_path`` is set, then ``roles_claim_name`` is ignored!
218
+ ;roles_claim_name = my-couchdb-roles
219
+
220
+ ; roles_claim_path was introduced to overcome disadvantages of ``roles_claim_name``,
221
+ ; because it is not possible with ``roles_claim_name`` to map nested role
222
+ ; attributes in the JWT token. There are only two characters with a special meaning.
223
+ ; These are
224
+ ; - ``.`` for nesting json attributes and
225
+ ; - ``\.`` to skip nesting
226
+ ; Example JWT data-payload:
227
+ ; {
228
+ ; "my": {
229
+ ; "nested": {
230
+ ; "_couchdb.roles": [
231
+ ; ...
232
+ ; ]
233
+ ; }
234
+ ; }
235
+ ; }
236
+ ; would result in the following parameter config:
237
+ ;roles_claim_path = my.nested._couchdb\.roles
238
+
239
+ ;[jwt_keys]
240
+ ; Configure at least one key here if using the JWT auth handler.
241
+ ; If your JWT tokens do not include a "kid" attribute, use "_default"
242
+ ; as the config key, otherwise use the kid as the config key.
243
+ ; Examples:
244
+ ; hmac:_default = aGVsbG8=
245
+ ; hmac:foo = aGVsbG8=
246
+ ; The config values can represent symmetric and asymmetrics keys.
247
+ ; For symmetrics keys, the value is base64 encoded;
248
+ ; hmac:_default = aGVsbG8= # base64-encoded form of "hello"
249
+ ; For asymmetric keys, the value is the PEM encoding of the public
250
+ ; key with newlines replaced with the escape sequence \n.
251
+ ; rsa:foo = -----BEGIN PUBLIC KEY-----\nMIIBIjAN...IDAQAB\n-----END PUBLIC KEY-----\n
252
+ ; ec:bar = -----BEGIN PUBLIC KEY-----\nMHYwEAYHK...AzztRs\n-----END PUBLIC KEY-----\n
253
+ ; Since version 3.3 it's possible for keys to contain "=" characters when the
254
+ ; config setting is in the "key = value" format. In other words, there must be a space
255
+ ; between the key and the equals sign, and another space between the equal sign
256
+ ; and the value. For example, it should look like this:
257
+ ; rsa:h213h2h1jg3hj2= = <somevalue>
258
+ ; and *not* like this:
259
+ ; rsa:h213h2h1jg3hj2==<somevalue>
260
+
261
+ [couch_peruser]
262
+ ; If enabled, couch_peruser ensures that a private per-user database
263
+ ; exists for each document in _users. These databases are writable only
264
+ ; by the corresponding user. Databases are in the following form:
265
+ ; userdb-{hex encoded username}
266
+ ;enable = false
267
+
268
+ ; If set to true and a user is deleted, the respective database gets
269
+ ; deleted as well.
270
+ ;delete_dbs = false
271
+
272
+ ; Set a default q value for peruser-created databases that is different from
273
+ ; cluster / q
274
+ ;q = 1
275
+
276
+ ; prefix for user databases. If you change this after user dbs have been
277
+ ; created, the existing databases won't get deleted if the associated user
278
+ ; gets deleted because of the then prefix mismatch.
279
+ ;database_prefix = userdb-
280
+
281
+ [httpd]
282
+ port = 5986
283
+ bind_address = 127.0.0.1
284
+ ;authentication_handlers = {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
285
+
286
+ ; Options for the MochiWeb HTTP server.
287
+ ;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
288
+
289
+ ; For more socket options, consult Erlang's module 'inet' man page.
290
+ ;socket_options = [{recbuf, undefined}, {sndbuf, 262144}, {nodelay, true}]
291
+ ;socket_options = [{sndbuf, 262144}]
292
+
293
+ ; These settings were moved to [chttpd]
294
+ ; secure_rewrites, allow_jsonp, enable_cors, enable_xframe_options,
295
+ ; max_uri_length, changes_timeout, config_whitelist, rewrite_limit,
296
+ ; x_forwarded_host, x_forwarded_proto, x_forwarded_ssl, max_http_request_size
297
+
298
+ ;[httpd_design_handlers]
299
+ ;_view =
300
+
301
+ ;[ioq]
302
+ ;concurrency = 10
303
+ ;ratio = 0.01
304
+
305
+ [ssl]
306
+ ;port = 6984
307
+
308
+ [chttpd_auth]
309
+ ;authentication_db = _users
310
+
311
+ ; These options are moved from [couch_httpd_auth]
312
+ ;authentication_redirect = /_utils/session.html
313
+ ;timeout = 600 ; number of seconds before automatic logout
314
+ ;auth_cache_size = 50 ; size is number of cache entries
315
+ ;allow_persistent_cookies = true ; set to false to disallow persistent cookies
316
+ ;iterations = 10 ; iterations for password hashing
317
+ ;min_iterations = 1
318
+ ;max_iterations = 1000000000
319
+ ;password_scheme = pbkdf2
320
+
321
+ ; List of Erlang RegExp or tuples of RegExp and an optional error message.
322
+ ; Where a new password must match all RegExp.
323
+ ; Example: [{".{10,}", "Password min length is 10 characters."}, "\\d+"]
324
+ ;password_regexp = []
325
+ ;proxy_use_secret = false
326
+
327
+ ; comma-separated list of public fields, 404 if empty
328
+ ;public_fields =
329
+ ;secret =
330
+ ;users_db_public = false
331
+ ;cookie_domain = example.com
332
+
333
+ ; Set the SameSite cookie property for the auth cookie. If empty, the SameSite property is not set.
334
+ ;same_site =
335
+
336
+ ; Set the HMAC algorithm used by cookie authentication
337
+ ; Possible values: sha,sha224,sha256,sha384,sha512,sha3_224,sha3_256,sha3_384,sha3_512,
338
+ ; blake2b,blake2s,md4,md5,ripemd160
339
+ ; New cookie sessions are generated with the first hash algorithm.
340
+ ; All values can be used to decode the session.
341
+ hash_algorithms = sha256, sha
342
+
343
+ ;[chttpd_auth_cache]
344
+ ;max_lifetime = 600000
345
+ ;max_objects =
346
+ ;max_size = 104857600
347
+
348
+ ;[mem3]
349
+ ;nodes_db = _nodes
350
+ ;shard_cache_size = 25000
351
+ ;shards_db = _dbs
352
+ ;sync_concurrency = 10
353
+
354
+ ; When enabled, internal replicator will replicate purge requests between shard
355
+ ; copies. It may be helpful to disable it temporarily when doing rolling node
356
+ ; upgrades from CouchDB versions before 2.3.0 when clustered purge feature was
357
+ ; introduced
358
+ ;replicate_purges = true
359
+
360
+ ;[fabric]
361
+ ;all_docs_concurrency = 10
362
+ ;changes_duration =
363
+ ;shard_timeout_factor = 2
364
+ ;shard_timeout_min_msec = 100
365
+ ;uuid_prefix_len = 7
366
+ ;request_timeout = 60000
367
+ ;all_docs_timeout = 10000
368
+ ;attachments_timeout = 60000
369
+ ;view_timeout = 3600000
370
+ ;partition_view_timeout = 3600000
371
+
372
+ ;[rexi]
373
+ ;buffer_count = 2000
374
+ ;server_per_node = true
375
+ ;stream_limit = 5
376
+
377
+ ; Use a single message to kill a group of remote workers. This feature is
378
+ ; available starting with 3.0. When performing a rolling upgrade from 2.x to
379
+ ; 3.x, set this value to false, then after all nodes were upgraded delete it so
380
+ ; it can use the default true value.
381
+ ;use_kill_all = true
382
+
383
+ ;[global_changes]
384
+ ;max_event_delay = 25
385
+ ;max_write_delay = 500
386
+ ;update_db = true
387
+
388
+ ;[view_updater]
389
+ ;min_writer_items = 100
390
+ ;min_writer_size = 16777216
391
+
392
+ [couch_httpd_auth]
393
+ ; WARNING! This only affects the node-local port (5986 by default).
394
+ ; You probably want the settings under [chttpd].
395
+ authentication_db = _users
396
+
397
+ ; These settings were moved to [chttpd_auth]
398
+ ; authentication_redirect, timeout,
399
+ ; auth_cache_size, allow_persistent_cookies, iterations, min_iterations,
400
+ ; max_iterations, password_scheme, password_regexp, proxy_use_secret,
401
+ ; public_fields, secret, users_db_public, cookie_domain, same_site
402
+
403
+ ; CSP (Content Security Policy) Support
404
+ [csp]
405
+ ;utils_enable = true
406
+ ;utils_header_value = default-src 'self'; img-src 'self'; font-src *; script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';
407
+ ;attachments_enable = true
408
+ ;attachments_header_value = sandbox
409
+ ;showlist_enable = true
410
+ ;showlist_header_value = sandbox
411
+
412
+ [cors]
413
+ credentials = true
414
+ ; List of origins separated by a comma, * means accept all
415
+ ; Origins must include the scheme: http://example.com
416
+ ; You can't set origins: * and credentials = true at the same time.
417
+ origins = *
418
+
419
+ ; List of accepted headers separated by a comma
420
+ headers = accept, authorization, content-type, origin, referer, x-csrf-token
421
+
422
+ ; List of accepted methods
423
+ methods = GET, PUT, POST, HEAD, DELETE
424
+
425
+ ; Configuration for a vhost
426
+ ;[cors:http://example.com]
427
+ ;credentials = false
428
+ ; List of origins separated by a comma
429
+ ; Origins must include the scheme: http://example.com
430
+ ; You can't set origins: * and credentials = true at the same time.
431
+ ;origins =
432
+
433
+ ; List of accepted headers separated by a comma
434
+ ;headers =
435
+
436
+ ; List of accepted methods
437
+ ;methods =
438
+
439
+ ; Configuration for the design document cache
440
+ ;[ddoc_cache]
441
+ ; The maximum size of the cache in bytes
442
+ ;max_size = 104857600 ; 100MiB
443
+
444
+ ; The period each cache entry should wait before
445
+ ; automatically refreshing in milliseconds
446
+ ;refresh_timeout = 67000
447
+
448
+ [x_frame_options]
449
+ ; Settings same-origin will return X-Frame-Options: SAMEORIGIN.
450
+ ; If same origin is set, it will ignore the hosts setting
451
+ ;same_origin = true
452
+
453
+ ; Settings hosts will return X-Frame-Options: ALLOW-FROM https://example.com/
454
+ ; List of hosts separated by a comma. * means accept all
455
+ ;hosts =
456
+
457
+ [native_query_servers]
458
+ ; Erlang Query Server
459
+ ;enable_erlang_query_server = false
460
+
461
+ ; Changing reduce_limit to false will disable reduce_limit.
462
+ ; If you think you're hitting reduce_limit with a "good" reduce function,
463
+ ; please let us know on the mailing list so we can fine tune the heuristic.
464
+ [query_server_config]
465
+ ;commit_freq = 5
466
+ ;reduce_limit = true
467
+ ;os_process_limit = 100
468
+ ;os_process_idle_limit = 300
469
+ ;os_process_soft_limit = 100
470
+
471
+ ; Timeout for how long a response from a busy view group server can take.
472
+ ; "infinity" is also a valid configuration value.
473
+ ;group_info_timeout = 5000
474
+ ;query_limit = 268435456
475
+ ;partition_query_limit = 268435456
476
+
477
+ ; Configure what to use as the db tag when selecting design doc couchjs
478
+ ; processes. The choices are:
479
+ ; - name (default) : Use the entire db name
480
+ ; - prefix : Use only db prefix before the first "/" character
481
+ ; - none : Do not use a db tag at all
482
+ ;db_tag = name
483
+
484
+ [mango]
485
+ ; Set to true to disable the "index all fields" text index, which can lead
486
+ ; to out of memory issues when users have documents with nested array fields.
487
+ ;index_all_disabled = false
488
+
489
+ ; Default limit value for mango _find queries.
490
+ ;default_limit = 25
491
+
492
+ ; Ratio between documents scanned and results matched that will
493
+ ; generate a warning in the _find response. Setting this to 0 disables
494
+ ; the warning.
495
+ ;index_scan_warning_threshold = 10
496
+
497
+ [indexers]
498
+ couch_mrview = true
499
+
500
+ [feature_flags]
501
+ ; This enables any database to be created as a partitioned databases (except system db's).
502
+ ; Setting this to false will stop the creation of paritioned databases.
503
+ ; paritioned||allowed* = true will scope the creation of partitioned databases
504
+ ; to databases with 'allowed' prefix.
505
+ partitioned||* = true
506
+
507
+ [uuids]
508
+ ; Known algorithms:
509
+ ; random - 128 bits of random awesome
510
+ ; All awesome, all the time.
511
+ ; sequential - monotonically increasing ids with random increments
512
+ ; First 26 hex characters are random. Last 6 increment in
513
+ ; random amounts until an overflow occurs. On overflow, the
514
+ ; random prefix is regenerated and the process starts over.
515
+ ; utc_random - Time since Jan 1, 1970 UTC with microseconds
516
+ ; First 14 characters are the time in hex. Last 18 are random.
517
+ ; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string
518
+ ; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these.
519
+ ;algorithm = sequential
520
+
521
+ ; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm.
522
+ ; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids.
523
+ ;utc_id_suffix =
524
+
525
+ ; Maximum number of UUIDs retrievable from /_uuids in a single request
526
+ ;max_count = 1000
527
+
528
+ [attachments]
529
+ ;compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
530
+ ;compressible_types = text/*, application/javascript, application/json, application/xml
531
+
532
+ [replicator]
533
+ ; Random jitter applied on replication job startup (milliseconds)
534
+ ;startup_jitter = 5000
535
+
536
+ ; Number of actively running replications
537
+ ;max_jobs = 500
538
+
539
+ ;Scheduling interval in milliseconds. During each reschedule cycle
540
+ ;interval = 60000
541
+
542
+ ; Maximum number of replications to start and stop during rescheduling.
543
+ ;max_churn = 20
544
+
545
+ ; More worker processes can give higher network throughput but can also
546
+ ; imply more disk and network IO.
547
+ ;worker_processes = 4
548
+
549
+ ; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
550
+ ; also reduce the total amount of used RAM memory.
551
+ ;worker_batch_size = 500
552
+
553
+ ; Maximum number of HTTP connections per replication.
554
+ ;http_connections = 20
555
+
556
+ ; HTTP connection timeout per replication.
557
+ ; Even for very fast/reliable networks it might need to be increased if a remote
558
+ ; database is too busy.
559
+ ;connection_timeout = 30000
560
+
561
+ ; Request timeout
562
+ ;request_timeout = infinity
563
+
564
+ ; If a request fails, the replicator will retry it up to N times.
565
+ ;retries_per_request = 5
566
+
567
+ ; Use checkpoints
568
+ ;use_checkpoints = true
569
+
570
+ ; Attempt to use bulk_get for fetching documents from the source
571
+ ;use_bulk_get = true
572
+
573
+ ; Checkpoint interval
574
+ ;checkpoint_interval = 30000
575
+
576
+ ; Some socket options that might boost performance in some scenarios:
577
+ ; {nodelay, boolean()}
578
+ ; {sndbuf, integer()}
579
+ ; {recbuf, integer()}
580
+ ; {priority, integer()}
581
+ ; See the `inet` Erlang module's man page for the full list of options.
582
+ ;socket_options = [{keepalive, true}, {nodelay, false}]
583
+
584
+ ; Valid socket options. Options not in this list are ignored. The full list of
585
+ ; options may be found at https://www.erlang.org/doc/man/inet.html#setopts-2.
586
+ ;valid_socket_options = buffer,keepalive,nodelay,priority,recbuf,sndbuf
587
+
588
+ ; Valid replication endpoint protocols. Replication jobs with endpoint urls not
589
+ ; in this list will fail to run.
590
+ ;valid_endpoint_protocols = http,https
591
+
592
+ ; Valid replication proxy protocols. Replication jobs with proxy urls not in
593
+ ; this list will fail to run.
594
+ ;valid_proxy_protocols = http,https,socks5
595
+
596
+ ; Path to a file containing the user's certificate.
597
+ ;cert_file = /full/path/to/server_cert.pem
598
+
599
+ ; Path to file containing user's private PEM encoded key.
600
+ ;key_file = /full/path/to/server_key.pem
601
+
602
+ ; String containing the user's password. Only used if the private keyfile is password protected.
603
+ ;password = somepassword
604
+
605
+ ; Set to true to validate peer certificates.
606
+ ;verify_ssl_certificates = false
607
+
608
+ ; File containing a list of peer trusted certificates (in the PEM format).
609
+ ;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
610
+
611
+ ; Maximum peer certificate depth (must be set even if certificate validation is off).
612
+ ;ssl_certificate_max_depth = 3
613
+
614
+ ; Maximum document ID length for replication.
615
+ ;max_document_id_length = infinity
616
+
617
+ ; How much time to wait before retrying after a missing doc exception. This
618
+ ; exception happens if the document was seen in the changes feed, but internal
619
+ ; replication hasn't caught up yet, and fetching document's revisions
620
+ ; fails. This a common scenario when source is updated while continuous
621
+ ; replication is running. The retry period would depend on how quickly internal
622
+ ; replication is expected to catch up. In general this is an optimisation to
623
+ ; avoid crashing the whole replication job, which would consume more resources
624
+ ; and add log noise.
625
+ ;missing_doc_retry_msec = 2000
626
+
627
+ ; Wait this many seconds after startup before attaching changes listeners
628
+ ;cluster_start_period = 5
629
+
630
+ ; Re-check cluster state at least every cluster_quiet_period seconds
631
+ ;cluster_quiet_period = 60
632
+
633
+ ; List of replicator client authentication plugins to try. Plugins will be
634
+ ; tried in order. The first to initialize successfully will be used for that
635
+ ; particular endpoint (source or target). Normally couch_replicator_auth_noop
636
+ ; would be used at the end of the list as a "catch-all". It doesn't do anything
637
+ ; and effectively implements the previous behavior of using basic auth.
638
+ ; There are currently two plugins available:
639
+ ; couch_replicator_auth_session - use _session cookie authentication
640
+ ; couch_replicator_auth_noop - use basic authentication (previous default)
641
+ ; Currently, the new _session cookie authentication is tried first, before
642
+ ; falling back to the old basic authenticaion default:
643
+ ;auth_plugins = couch_replicator_auth_session,couch_replicator_auth_noop
644
+
645
+ ; To restore the old behaviour, use the following value:
646
+ ;auth_plugins = couch_replicator_auth_noop
647
+
648
+ ; Force couch_replicator_auth_session plugin to refresh the session
649
+ ; periodically if max-age is not present in the cookie. This is mostly to
650
+ ; handle the case where anonymous writes are allowed to the database and a VDU
651
+ ; function is used to forbid writes based on the authenticated user name. In
652
+ ; that case this value should be adjusted based on the expected minimum session
653
+ ; expiry timeout on replication endpoints. If session expiry results in a 401
654
+ ; or 403 response this setting is not needed.
655
+ ;session_refresh_interval_sec = 550
656
+
657
+ ; Usage coefficient decays historic fair share usage every scheduling
658
+ ; cycle. The value must be between 0.0 and 1.0. Lower values will
659
+ ; ensure historic usage decays quicker and higher values means it will
660
+ ; be remembered longer.
661
+ ;usage_coeff = 0.5
662
+
663
+ ; Priority coefficient decays all the job priorities such that they slowly
664
+ ; drift towards the front of the run queue. This coefficient defines a maximum
665
+ ; time window over which this algorithm would operate. For example, if this
666
+ ; value is too small (0.1), after a few cycles quite a few jobs would end up at
667
+ ; priority 0, and would render this algorithm useless. The default value of
668
+ ; 0.98 is picked such that if a job ran for one scheduler cycle, then didn't
669
+ ; get to run for 7 hours, it would still have priority > 0. 7 hours was picked
670
+ ; as it was close enought to 8 hours which is the default maximum error backoff
671
+ ; interval.
672
+ ;priority_coeff = 0.98
673
+
674
+ [replicator.shares]
675
+ ; Fair share configuration section. More shares result in a higher
676
+ ; chance that jobs from that db get to run. The default value is 100,
677
+ ; minimum is 1 and maximum is 1000. The configuration may be set even
678
+ ; if the database does not exist.
679
+ ;_replicator = 100
680
+
681
+ [log]
682
+ ; Possible log levels:
683
+ ; debug
684
+ ; info
685
+ ; notice
686
+ ; warning, warn
687
+ ; error, err
688
+ ; critical, crit
689
+ ; alert
690
+ ; emergency, emerg
691
+ ; none
692
+ ;level = info
693
+
694
+ ; Set the maximum log message length in bytes that will be
695
+ ; passed through the writer
696
+ ;max_message_size = 16000
697
+
698
+ ; Do not log last message received by terminated process
699
+ ;strip_last_msg = true
700
+
701
+ ; List of fields to remove before logging the crash report
702
+ ;filter_fields = [pid, registered_name, error_info, messages]
703
+
704
+ ; There are four different log writers that can be configured
705
+ ; to write log messages. The default writes to stderr of the
706
+ ; Erlang VM which is useful for debugging/development as well
707
+ ; as a lot of container deployments.
708
+ ;
709
+ ; There's also a file writer that works with logrotate, a
710
+ ; rsyslog writer for deployments that need to have logs sent
711
+ ; over the network, and a journald writer that's more suitable
712
+ ; when using systemd journald.
713
+ ;writer = stderr
714
+
715
+ ; Journald Writer notes:
716
+ ;
717
+ ; The journald writer doesn't have any options. It still writes
718
+ ; the logs to stderr, but without the timestamp prepended, since
719
+ ; the journal will add it automatically, and with the log level
720
+ ; formatted as per
721
+ ; https://www.freedesktop.org/software/systemd/man/sd-daemon.html
722
+ ;
723
+ ; File Writer Options:
724
+ ;
725
+ ; The file writer will check every 30s to see if it needs
726
+ ; to reopen its file. This is useful for people that configure
727
+ ; logrotate to move log files periodically.
728
+ ;file = ./couch.log ; Path name to write logs to
729
+
730
+ ; Write operations will happen either every write_buffer bytes
731
+ ; or write_delay milliseconds. These are passed directly to the
732
+ ; Erlang file module with the write_delay option documented here:
733
+ ;
734
+ ; http://erlang.org/doc/man/file.html
735
+ ;
736
+ ;write_buffer = 0
737
+ ;write_delay = 0
738
+ ;
739
+ ; Syslog Writer Options:
740
+ ;
741
+ ; The syslog writer options all correspond to their obvious
742
+ ; counter parts in rsyslog nomenclature.
743
+ ;syslog_host =
744
+ ;syslog_port = 514
745
+ ;syslog_appid = couchdb
746
+ ;syslog_facility = local2
747
+
748
+ [stats]
749
+ ; Stats collection interval in seconds. Default 10 seconds.
750
+ ;interval = 10
751
+
752
+ [smoosh]
753
+ ; More documentation on these is in the Automatic Compaction
754
+ ; section of the documentation.
755
+ ;db_channels = upgrade_dbs,ratio_dbs,slack_dbs
756
+ ;view_channels = upgrade_views,ratio_views,slack_views
757
+
758
+ ;[smoosh.ratio_dbs]
759
+ ;priority = ratio
760
+ ;min_priority = 2.0
761
+
762
+ ;[smoosh.ratio_views]
763
+ ;priority = ratio
764
+ ;min_priority = 2.0
765
+
766
+ ;[smoosh.slack_dbs]
767
+ ;priority = slack
768
+ ;min_priority = 536870912
769
+
770
+ ;[smoosh.slack_views]
771
+ ;priority = slack
772
+ ;min_priority = 536870912
773
+
774
+ ; Directory to store the state of smoosh
775
+ state_dir = ./data
776
+
777
+ ; Sets the log level for informational compaction related entries.
778
+ ;compaction_log_level = debug
779
+
780
+ ; Enable persistence for smoosh state
781
+ ;persist = false
782
+
783
+ [ioq]
784
+ ; The maximum number of concurrent in-flight IO requests that
785
+ ;concurrency = 10
786
+
787
+ ; The fraction of the time that a background IO request will be selected
788
+ ; over an interactive IO request when both queues are non-empty
789
+ ;ratio = 0.01
790
+
791
+ [ioq.bypass]
792
+ ; System administrators can choose to submit specific classes of IO directly
793
+ ; to the underlying file descriptor or OS process, bypassing the queues
794
+ ; altogether. Installing a bypass can yield higher throughput and lower
795
+ ; latency, but relinquishes some control over prioritization. The following
796
+ ; classes are recognized with the following defaults:
797
+
798
+ ; Messages on their way to an external process (e.g., couchjs) are bypassed
799
+ ;os_process = true
800
+
801
+ ; Disk IO fulfilling interactive read requests is bypassed
802
+ ;read = true
803
+
804
+ ; Disk IO required to update a database is bypassed
805
+ ;write = true
806
+
807
+ ; Disk IO required to update views and other secondary indexes is bypassed
808
+ ;view_update = true
809
+
810
+ ; Disk IO issued by the background replication processes that fix any
811
+ ; inconsistencies between shard copies is queued
812
+ ;shard_sync = false
813
+
814
+ ; Disk IO issued by compaction jobs is queued
815
+ ;compaction = false
816
+
817
+ [dreyfus]
818
+ ; The name and location of the Clouseau Java service required to
819
+ ; enable Search functionality.
820
+ ;name = clouseau@127.0.0.1
821
+
822
+ ; CouchDB will try to re-connect to Clouseau using a bounded
823
+ ; exponential backoff with the following number of iterations.
824
+ ;retry_limit = 5
825
+
826
+ ; The default number of results returned from a global search query.
827
+ ;limit = 25
828
+
829
+ ; The default number of results returned from a search on a partition
830
+ ; of a database.
831
+ ;limit_partitions = 2000
832
+
833
+ ; The maximum number of results that can be returned from a global
834
+ ; search query (or any search query on a database without user-defined
835
+ ; partitions). Attempts to set ?limit=N higher than this value will
836
+ ; be rejected.
837
+ ;max_limit = 200
838
+
839
+ ; The maximum number of results that can be returned when searching
840
+ ; a partition of a database. Attempts to set ?limit=N higher than this
841
+ ; value will be rejected. If this config setting is not defined,
842
+ ; CouchDB will use the value of `max_limit` instead. If neither is
843
+ ; defined, the default is 2000 as stated here.
844
+ ;max_limit_partitions = 2000
845
+
846
+ [reshard]
847
+ ;max_jobs = 48
848
+ ;max_history = 20
849
+ ;max_retries = 5
850
+ ;retry_interval_sec = 10
851
+ ;delete_source = true
852
+ ;update_shard_map_timeout_sec = 60
853
+ ;source_close_timeout_sec = 600
854
+ ;require_node_param = false
855
+ ;require_range_param = false
856
+
857
+ ; How many times to retry building an individual index
858
+ ;index_max_retries = 5
859
+
860
+ ; How many seconds to wait between retries for an individual index
861
+ ;index_retry_interval_sec = 10
862
+
863
+ [prometheus]
864
+ additional_port = false
865
+ bind_address = 127.0.0.1
866
+ port = 17986
867
+
868
+ [view_upgrade]
869
+ ; When enabled, views with more than one collator versions will be submitted
870
+ ; for auto-compaction to smoosh's "upgrade_views" channel.
871
+ ;compact_on_collator_upgrade = true
872
+
873
+ ; Eagerly commit views which been upgraded from older header formats. A reason
874
+ ; to disable this setting could be if the views need an upgrade but located on
875
+ ; read-only file system.
876
+ ;commit_on_header_upgrade = true
877
+
878
+ [custodian]
879
+ ; When set to `true`, force using `[cluster] n` values as the expected n value
880
+ ; of of shard copies. In cases where the application prevents creating
881
+ ; non-default n databases, this could help detect case where the shard map was
882
+ ; altered by hand, or via an external tools, such that it doesn't have the
883
+ ; necessary number of copies for some ranges. By default, when the setting is
884
+ ; `false`, the expected n value is based on the number of available copies in
885
+ ; the shard map.
886
+ ;use_cluster_n_as_expected_n = false