@terafina/tffa-sfdx-plugin 0.1.1 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/README.md +5 -0
  2. package/assets/favicon.png +0 -0
  3. package/assets/highlight.css +68 -0
  4. package/assets/highlight.js +2801 -0
  5. package/assets/icon.png +0 -0
  6. package/assets/index.css +447 -0
  7. package/assets/index.js +411 -0
  8. package/assets/logo.png +0 -0
  9. package/assets/lunr.js +3471 -0
  10. package/assets/mark.js +13 -0
  11. package/assets/menu.js +34 -0
  12. package/assets/search.js +98 -0
  13. package/lib/apexdoc/common/apex-docs-error.d.ts +23 -0
  14. package/lib/apexdoc/common/apex-docs-error.js +28 -0
  15. package/lib/apexdoc/common/apex-docs-error.js.map +1 -0
  16. package/lib/apexdoc/common/array-utils.d.ts +9 -0
  17. package/lib/apexdoc/common/array-utils.js +36 -0
  18. package/lib/apexdoc/common/array-utils.js.map +1 -0
  19. package/lib/apexdoc/common/line-reader.d.ts +40 -0
  20. package/lib/apexdoc/common/line-reader.js +92 -0
  21. package/lib/apexdoc/common/line-reader.js.map +1 -0
  22. package/lib/apexdoc/common/models/apex-model.d.ts +51 -0
  23. package/lib/apexdoc/common/models/apex-model.js +264 -0
  24. package/lib/apexdoc/common/models/apex-model.js.map +1 -0
  25. package/lib/apexdoc/common/models/class-group.d.ts +10 -0
  26. package/lib/apexdoc/common/models/class-group.js +32 -0
  27. package/lib/apexdoc/common/models/class-group.js.map +1 -0
  28. package/lib/apexdoc/common/models/class-model.d.ts +38 -0
  29. package/lib/apexdoc/common/models/class-model.js +216 -0
  30. package/lib/apexdoc/common/models/class-model.js.map +1 -0
  31. package/lib/apexdoc/common/models/engine-config.d.ts +21 -0
  32. package/lib/apexdoc/common/models/engine-config.js +29 -0
  33. package/lib/apexdoc/common/models/engine-config.js.map +1 -0
  34. package/lib/apexdoc/common/models/enum-model.d.ts +15 -0
  35. package/lib/apexdoc/common/models/enum-model.js +47 -0
  36. package/lib/apexdoc/common/models/enum-model.js.map +1 -0
  37. package/lib/apexdoc/common/models/index.d.ts +10 -0
  38. package/lib/apexdoc/common/models/index.js +14 -0
  39. package/lib/apexdoc/common/models/index.js.map +1 -0
  40. package/lib/apexdoc/common/models/method-model.d.ts +26 -0
  41. package/lib/apexdoc/common/models/method-model.js +309 -0
  42. package/lib/apexdoc/common/models/method-model.js.map +1 -0
  43. package/lib/apexdoc/common/models/property-model.d.ts +12 -0
  44. package/lib/apexdoc/common/models/property-model.js +47 -0
  45. package/lib/apexdoc/common/models/property-model.js.map +1 -0
  46. package/lib/apexdoc/common/models/top-level-model.d.ts +19 -0
  47. package/lib/apexdoc/common/models/top-level-model.js +32 -0
  48. package/lib/apexdoc/common/models/top-level-model.js.map +1 -0
  49. package/lib/apexdoc/common/settings.d.ts +55 -0
  50. package/lib/apexdoc/common/settings.js +124 -0
  51. package/lib/apexdoc/common/settings.js.map +1 -0
  52. package/lib/apexdoc/common/tags.d.ts +22 -0
  53. package/lib/apexdoc/common/tags.js +168 -0
  54. package/lib/apexdoc/common/tags.js.map +1 -0
  55. package/lib/apexdoc/common/utils.d.ts +38 -0
  56. package/lib/apexdoc/common/utils.js +198 -0
  57. package/lib/apexdoc/common/utils.js.map +1 -0
  58. package/lib/apexdoc/common/validator-engine.d.ts +24 -0
  59. package/lib/apexdoc/common/validator-engine.js +162 -0
  60. package/lib/apexdoc/common/validator-engine.js.map +1 -0
  61. package/lib/apexdoc/common/validator.d.ts +16 -0
  62. package/lib/apexdoc/common/validator.js +35 -0
  63. package/lib/apexdoc/common/validator.js.map +1 -0
  64. package/lib/apexdoc/engine/file-manager.d.ts +52 -0
  65. package/lib/apexdoc/engine/file-manager.js +390 -0
  66. package/lib/apexdoc/engine/file-manager.js.map +1 -0
  67. package/lib/apexdoc/engine/generators/generator-utils.d.ts +12 -0
  68. package/lib/apexdoc/engine/generators/generator-utils.js +100 -0
  69. package/lib/apexdoc/engine/generators/generator-utils.js.map +1 -0
  70. package/lib/apexdoc/engine/generators/menu-generator.d.ts +9 -0
  71. package/lib/apexdoc/engine/generators/menu-generator.js +81 -0
  72. package/lib/apexdoc/engine/generators/menu-generator.js.map +1 -0
  73. package/lib/apexdoc/engine/generators/models/child-enum-markup-generator.d.ts +10 -0
  74. package/lib/apexdoc/engine/generators/models/child-enum-markup-generator.js +65 -0
  75. package/lib/apexdoc/engine/generators/models/child-enum-markup-generator.js.map +1 -0
  76. package/lib/apexdoc/engine/generators/models/class-markup-generator.d.ts +8 -0
  77. package/lib/apexdoc/engine/generators/models/class-markup-generator.js +49 -0
  78. package/lib/apexdoc/engine/generators/models/class-markup-generator.js.map +1 -0
  79. package/lib/apexdoc/engine/generators/models/enum-markup-generator.d.ts +9 -0
  80. package/lib/apexdoc/engine/generators/models/enum-markup-generator.js +40 -0
  81. package/lib/apexdoc/engine/generators/models/enum-markup-generator.js.map +1 -0
  82. package/lib/apexdoc/engine/generators/models/markup-generator.d.ts +27 -0
  83. package/lib/apexdoc/engine/generators/models/markup-generator.js +148 -0
  84. package/lib/apexdoc/engine/generators/models/markup-generator.js.map +1 -0
  85. package/lib/apexdoc/engine/generators/models/method-markup-generator.d.ts +22 -0
  86. package/lib/apexdoc/engine/generators/models/method-markup-generator.js +224 -0
  87. package/lib/apexdoc/engine/generators/models/method-markup-generator.js.map +1 -0
  88. package/lib/apexdoc/engine/generators/models/property-markup-generator.d.ts +11 -0
  89. package/lib/apexdoc/engine/generators/models/property-markup-generator.js +66 -0
  90. package/lib/apexdoc/engine/generators/models/property-markup-generator.js.map +1 -0
  91. package/lib/apexdoc/engine/generators/models/source-markup-generator.d.ts +10 -0
  92. package/lib/apexdoc/engine/generators/models/source-markup-generator.js +39 -0
  93. package/lib/apexdoc/engine/generators/models/source-markup-generator.js.map +1 -0
  94. package/lib/apexdoc/engine/generators/models/top-level-markup-generator.d.ts +10 -0
  95. package/lib/apexdoc/engine/generators/models/top-level-markup-generator.js +49 -0
  96. package/lib/apexdoc/engine/generators/models/top-level-markup-generator.js.map +1 -0
  97. package/lib/apexdoc/engine/generators/see-link-generator.d.ts +20 -0
  98. package/lib/apexdoc/engine/generators/see-link-generator.js +183 -0
  99. package/lib/apexdoc/engine/generators/see-link-generator.js.map +1 -0
  100. package/lib/commands/tffa/apexdoc.d.ts +29 -0
  101. package/lib/commands/tffa/apexdoc.js +272 -0
  102. package/lib/commands/tffa/apexdoc.js.map +1 -0
  103. package/lib/commands/tffa/ping.d.ts +3 -0
  104. package/lib/commands/tffa/ping.js +3 -0
  105. package/lib/commands/tffa/ping.js.map +1 -1
  106. package/lib/commands/tffa/scan.d.ts +52 -0
  107. package/lib/commands/tffa/scan.js +362 -0
  108. package/lib/commands/tffa/scan.js.map +1 -0
  109. package/lib/index.d.ts +3 -0
  110. package/lib/index.js +3 -0
  111. package/lib/index.js.map +1 -1
  112. package/lib/shared/model.d.ts +30 -0
  113. package/lib/shared/model.js +17 -0
  114. package/lib/shared/model.js.map +1 -0
  115. package/lib/shared/parser.d.ts +3 -0
  116. package/lib/shared/parser.js +101 -0
  117. package/lib/shared/parser.js.map +1 -0
  118. package/lib/shared/rules.d.ts +3 -0
  119. package/lib/shared/rules.js +5 -0
  120. package/lib/shared/rules.js.map +1 -0
  121. package/oclif.manifest.json +1 -1
  122. package/package.json +49 -26
  123. package/CHANGELOG.md +0 -12
package/assets/lunr.js ADDED
@@ -0,0 +1,3471 @@
1
+ /**
2
+ * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.6
3
+ * Copyright (C) 2019 Oliver Nightingale
4
+ * @license MIT
5
+ */
6
+
7
+ ;(function(){
8
+
9
+ /**
10
+ * A convenience function for configuring and constructing
11
+ * a new lunr Index.
12
+ *
13
+ * A lunr.Builder instance is created and the pipeline setup
14
+ * with a trimmer, stop word filter and stemmer.
15
+ *
16
+ * This builder object is yielded to the configuration function
17
+ * that is passed as a parameter, allowing the list of fields
18
+ * and other builder parameters to be customised.
19
+ *
20
+ * All documents _must_ be added within the passed config function.
21
+ *
22
+ * @example
23
+ * var idx = lunr(function () {
24
+ * this.field('title')
25
+ * this.field('body')
26
+ * this.ref('id')
27
+ *
28
+ * documents.forEach(function (doc) {
29
+ * this.add(doc)
30
+ * }, this)
31
+ * })
32
+ *
33
+ * @see {@link lunr.Builder}
34
+ * @see {@link lunr.Pipeline}
35
+ * @see {@link lunr.trimmer}
36
+ * @see {@link lunr.stopWordFilter}
37
+ * @see {@link lunr.stemmer}
38
+ * @namespace {function} lunr
39
+ */
40
+ var lunr = function (config) {
41
+ var builder = new lunr.Builder
42
+
43
+ builder.pipeline.add(
44
+ lunr.trimmer,
45
+ lunr.stopWordFilter,
46
+ lunr.stemmer
47
+ )
48
+
49
+ builder.searchPipeline.add(
50
+ lunr.stemmer
51
+ )
52
+
53
+ config.call(builder, builder)
54
+ return builder.build()
55
+ }
56
+
57
+ lunr.version = "2.3.6"
58
+ /*!
59
+ * lunr.utils
60
+ * Copyright (C) 2019 Oliver Nightingale
61
+ */
62
+
63
+ /**
64
+ * A namespace containing utils for the rest of the lunr library
65
+ * @namespace lunr.utils
66
+ */
67
+ lunr.utils = {}
68
+
69
+ /**
70
+ * Print a warning message to the console.
71
+ *
72
+ * @param {String} message The message to be printed.
73
+ * @memberOf lunr.utils
74
+ * @function
75
+ */
76
+ lunr.utils.warn = (function (global) {
77
+ /* eslint-disable no-console */
78
+ return function (message) {
79
+ if (global.console && console.warn) {
80
+ console.warn(message)
81
+ }
82
+ }
83
+ /* eslint-enable no-console */
84
+ })(this)
85
+
86
+ /**
87
+ * Convert an object to a string.
88
+ *
89
+ * In the case of `null` and `undefined` the function returns
90
+ * the empty string, in all other cases the result of calling
91
+ * `toString` on the passed object is returned.
92
+ *
93
+ * @param {Any} obj The object to convert to a string.
94
+ * @return {String} string representation of the passed object.
95
+ * @memberOf lunr.utils
96
+ */
97
+ lunr.utils.asString = function (obj) {
98
+ if (obj === void 0 || obj === null) {
99
+ return ""
100
+ } else {
101
+ return obj.toString()
102
+ }
103
+ }
104
+
105
+ /**
106
+ * Clones an object.
107
+ *
108
+ * Will create a copy of an existing object such that any mutations
109
+ * on the copy cannot affect the original.
110
+ *
111
+ * Only shallow objects are supported, passing a nested object to this
112
+ * function will cause a TypeError.
113
+ *
114
+ * Objects with primitives, and arrays of primitives are supported.
115
+ *
116
+ * @param {Object} obj The object to clone.
117
+ * @return {Object} a clone of the passed object.
118
+ * @throws {TypeError} when a nested object is passed.
119
+ * @memberOf Utils
120
+ */
121
+ lunr.utils.clone = function (obj) {
122
+ if (obj === null || obj === undefined) {
123
+ return obj
124
+ }
125
+
126
+ var clone = Object.create(null),
127
+ keys = Object.keys(obj)
128
+
129
+ for (var i = 0; i < keys.length; i++) {
130
+ var key = keys[i],
131
+ val = obj[key]
132
+
133
+ if (Array.isArray(val)) {
134
+ clone[key] = val.slice()
135
+ continue
136
+ }
137
+
138
+ if (typeof val === 'string' ||
139
+ typeof val === 'number' ||
140
+ typeof val === 'boolean') {
141
+ clone[key] = val
142
+ continue
143
+ }
144
+
145
+ throw new TypeError("clone is not deep and does not support nested objects")
146
+ }
147
+
148
+ return clone
149
+ }
150
+ lunr.FieldRef = function (docRef, fieldName, stringValue) {
151
+ this.docRef = docRef
152
+ this.fieldName = fieldName
153
+ this._stringValue = stringValue
154
+ }
155
+
156
+ lunr.FieldRef.joiner = "/"
157
+
158
+ lunr.FieldRef.fromString = function (s) {
159
+ var n = s.indexOf(lunr.FieldRef.joiner)
160
+
161
+ if (n === -1) {
162
+ throw "malformed field ref string"
163
+ }
164
+
165
+ var fieldRef = s.slice(0, n),
166
+ docRef = s.slice(n + 1)
167
+
168
+ return new lunr.FieldRef (docRef, fieldRef, s)
169
+ }
170
+
171
+ lunr.FieldRef.prototype.toString = function () {
172
+ if (this._stringValue == undefined) {
173
+ this._stringValue = this.fieldName + lunr.FieldRef.joiner + this.docRef
174
+ }
175
+
176
+ return this._stringValue
177
+ }
178
+ /*!
179
+ * lunr.Set
180
+ * Copyright (C) 2019 Oliver Nightingale
181
+ */
182
+
183
+ /**
184
+ * A lunr set.
185
+ *
186
+ * @constructor
187
+ */
188
+ lunr.Set = function (elements) {
189
+ this.elements = Object.create(null)
190
+
191
+ if (elements) {
192
+ this.length = elements.length
193
+
194
+ for (var i = 0; i < this.length; i++) {
195
+ this.elements[elements[i]] = true
196
+ }
197
+ } else {
198
+ this.length = 0
199
+ }
200
+ }
201
+
202
+ /**
203
+ * A complete set that contains all elements.
204
+ *
205
+ * @static
206
+ * @readonly
207
+ * @type {lunr.Set}
208
+ */
209
+ lunr.Set.complete = {
210
+ intersect: function (other) {
211
+ return other
212
+ },
213
+
214
+ union: function (other) {
215
+ return other
216
+ },
217
+
218
+ contains: function () {
219
+ return true
220
+ }
221
+ }
222
+
223
+ /**
224
+ * An empty set that contains no elements.
225
+ *
226
+ * @static
227
+ * @readonly
228
+ * @type {lunr.Set}
229
+ */
230
+ lunr.Set.empty = {
231
+ intersect: function () {
232
+ return this
233
+ },
234
+
235
+ union: function (other) {
236
+ return other
237
+ },
238
+
239
+ contains: function () {
240
+ return false
241
+ }
242
+ }
243
+
244
+ /**
245
+ * Returns true if this set contains the specified object.
246
+ *
247
+ * @param {object} object - Object whose presence in this set is to be tested.
248
+ * @returns {boolean} - True if this set contains the specified object.
249
+ */
250
+ lunr.Set.prototype.contains = function (object) {
251
+ return !!this.elements[object]
252
+ }
253
+
254
+ /**
255
+ * Returns a new set containing only the elements that are present in both
256
+ * this set and the specified set.
257
+ *
258
+ * @param {lunr.Set} other - set to intersect with this set.
259
+ * @returns {lunr.Set} a new set that is the intersection of this and the specified set.
260
+ */
261
+
262
+ lunr.Set.prototype.intersect = function (other) {
263
+ var a, b, elements, intersection = []
264
+
265
+ if (other === lunr.Set.complete) {
266
+ return this
267
+ }
268
+
269
+ if (other === lunr.Set.empty) {
270
+ return other
271
+ }
272
+
273
+ if (this.length < other.length) {
274
+ a = this
275
+ b = other
276
+ } else {
277
+ a = other
278
+ b = this
279
+ }
280
+
281
+ elements = Object.keys(a.elements)
282
+
283
+ for (var i = 0; i < elements.length; i++) {
284
+ var element = elements[i]
285
+ if (element in b.elements) {
286
+ intersection.push(element)
287
+ }
288
+ }
289
+
290
+ return new lunr.Set (intersection)
291
+ }
292
+
293
+ /**
294
+ * Returns a new set combining the elements of this and the specified set.
295
+ *
296
+ * @param {lunr.Set} other - set to union with this set.
297
+ * @return {lunr.Set} a new set that is the union of this and the specified set.
298
+ */
299
+
300
+ lunr.Set.prototype.union = function (other) {
301
+ if (other === lunr.Set.complete) {
302
+ return lunr.Set.complete
303
+ }
304
+
305
+ if (other === lunr.Set.empty) {
306
+ return this
307
+ }
308
+
309
+ return new lunr.Set(Object.keys(this.elements).concat(Object.keys(other.elements)))
310
+ }
311
+ /**
312
+ * A function to calculate the inverse document frequency for
313
+ * a posting. This is shared between the builder and the index
314
+ *
315
+ * @private
316
+ * @param {object} posting - The posting for a given term
317
+ * @param {number} documentCount - The total number of documents.
318
+ */
319
+ lunr.idf = function (posting, documentCount) {
320
+ var documentsWithTerm = 0
321
+
322
+ for (var fieldName in posting) {
323
+ if (fieldName == '_index') continue // Ignore the term index, its not a field
324
+ documentsWithTerm += Object.keys(posting[fieldName]).length
325
+ }
326
+
327
+ var x = (documentCount - documentsWithTerm + 0.5) / (documentsWithTerm + 0.5)
328
+
329
+ return Math.log(1 + Math.abs(x))
330
+ }
331
+
332
+ /**
333
+ * A token wraps a string representation of a token
334
+ * as it is passed through the text processing pipeline.
335
+ *
336
+ * @constructor
337
+ * @param {string} [str=''] - The string token being wrapped.
338
+ * @param {object} [metadata={}] - Metadata associated with this token.
339
+ */
340
+ lunr.Token = function (str, metadata) {
341
+ this.str = str || ""
342
+ this.metadata = metadata || {}
343
+ }
344
+
345
+ /**
346
+ * Returns the token string that is being wrapped by this object.
347
+ *
348
+ * @returns {string}
349
+ */
350
+ lunr.Token.prototype.toString = function () {
351
+ return this.str
352
+ }
353
+
354
+ /**
355
+ * A token update function is used when updating or optionally
356
+ * when cloning a token.
357
+ *
358
+ * @callback lunr.Token~updateFunction
359
+ * @param {string} str - The string representation of the token.
360
+ * @param {Object} metadata - All metadata associated with this token.
361
+ */
362
+
363
+ /**
364
+ * Applies the given function to the wrapped string token.
365
+ *
366
+ * @example
367
+ * token.update(function (str, metadata) {
368
+ * return str.toUpperCase()
369
+ * })
370
+ *
371
+ * @param {lunr.Token~updateFunction} fn - A function to apply to the token string.
372
+ * @returns {lunr.Token}
373
+ */
374
+ lunr.Token.prototype.update = function (fn) {
375
+ this.str = fn(this.str, this.metadata)
376
+ return this
377
+ }
378
+
379
+ /**
380
+ * Creates a clone of this token. Optionally a function can be
381
+ * applied to the cloned token.
382
+ *
383
+ * @param {lunr.Token~updateFunction} [fn] - An optional function to apply to the cloned token.
384
+ * @returns {lunr.Token}
385
+ */
386
+ lunr.Token.prototype.clone = function (fn) {
387
+ fn = fn || function (s) { return s }
388
+ return new lunr.Token (fn(this.str, this.metadata), this.metadata)
389
+ }
390
+ /*!
391
+ * lunr.tokenizer
392
+ * Copyright (C) 2019 Oliver Nightingale
393
+ */
394
+
395
+ /**
396
+ * A function for splitting a string into tokens ready to be inserted into
397
+ * the search index. Uses `lunr.tokenizer.separator` to split strings, change
398
+ * the value of this property to change how strings are split into tokens.
399
+ *
400
+ * This tokenizer will convert its parameter to a string by calling `toString` and
401
+ * then will split this string on the character in `lunr.tokenizer.separator`.
402
+ * Arrays will have their elements converted to strings and wrapped in a lunr.Token.
403
+ *
404
+ * Optional metadata can be passed to the tokenizer, this metadata will be cloned and
405
+ * added as metadata to every token that is created from the object to be tokenized.
406
+ *
407
+ * @static
408
+ * @param {?(string|object|object[])} obj - The object to convert into tokens
409
+ * @param {?object} metadata - Optional metadata to associate with every token
410
+ * @returns {lunr.Token[]}
411
+ * @see {@link lunr.Pipeline}
412
+ */
413
+ lunr.tokenizer = function (obj, metadata) {
414
+ if (obj == null || obj == undefined) {
415
+ return []
416
+ }
417
+
418
+ if (Array.isArray(obj)) {
419
+ return obj.map(function (t) {
420
+ return new lunr.Token(
421
+ lunr.utils.asString(t).toLowerCase(),
422
+ lunr.utils.clone(metadata)
423
+ )
424
+ })
425
+ }
426
+
427
+ var str = obj.toString().trim().toLowerCase(),
428
+ len = str.length,
429
+ tokens = []
430
+
431
+ for (var sliceEnd = 0, sliceStart = 0; sliceEnd <= len; sliceEnd++) {
432
+ var char = str.charAt(sliceEnd),
433
+ sliceLength = sliceEnd - sliceStart
434
+
435
+ if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) {
436
+
437
+ if (sliceLength > 0) {
438
+ var tokenMetadata = lunr.utils.clone(metadata) || {}
439
+ tokenMetadata["position"] = [sliceStart, sliceLength]
440
+ tokenMetadata["index"] = tokens.length
441
+
442
+ tokens.push(
443
+ new lunr.Token (
444
+ str.slice(sliceStart, sliceEnd),
445
+ tokenMetadata
446
+ )
447
+ )
448
+ }
449
+
450
+ sliceStart = sliceEnd + 1
451
+ }
452
+
453
+ }
454
+
455
+ return tokens
456
+ }
457
+
458
+ /**
459
+ * The separator used to split a string into tokens. Override this property to change the behaviour of
460
+ * `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens.
461
+ *
462
+ * @static
463
+ * @see lunr.tokenizer
464
+ */
465
+ lunr.tokenizer.separator = /[\s\-]+/
466
+ /*!
467
+ * lunr.Pipeline
468
+ * Copyright (C) 2019 Oliver Nightingale
469
+ */
470
+
471
+ /**
472
+ * lunr.Pipelines maintain an ordered list of functions to be applied to all
473
+ * tokens in documents entering the search index and queries being ran against
474
+ * the index.
475
+ *
476
+ * An instance of lunr.Index created with the lunr shortcut will contain a
477
+ * pipeline with a stop word filter and an English language stemmer. Extra
478
+ * functions can be added before or after either of these functions or these
479
+ * default functions can be removed.
480
+ *
481
+ * When run the pipeline will call each function in turn, passing a token, the
482
+ * index of that token in the original list of all tokens and finally a list of
483
+ * all the original tokens.
484
+ *
485
+ * The output of functions in the pipeline will be passed to the next function
486
+ * in the pipeline. To exclude a token from entering the index the function
487
+ * should return undefined, the rest of the pipeline will not be called with
488
+ * this token.
489
+ *
490
+ * For serialisation of pipelines to work, all functions used in an instance of
491
+ * a pipeline should be registered with lunr.Pipeline. Registered functions can
492
+ * then be loaded. If trying to load a serialised pipeline that uses functions
493
+ * that are not registered an error will be thrown.
494
+ *
495
+ * If not planning on serialising the pipeline then registering pipeline functions
496
+ * is not necessary.
497
+ *
498
+ * @constructor
499
+ */
500
+ lunr.Pipeline = function () {
501
+ this._stack = []
502
+ }
503
+
504
+ lunr.Pipeline.registeredFunctions = Object.create(null)
505
+
506
+ /**
507
+ * A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token
508
+ * string as well as all known metadata. A pipeline function can mutate the token string
509
+ * or mutate (or add) metadata for a given token.
510
+ *
511
+ * A pipeline function can indicate that the passed token should be discarded by returning
512
+ * null. This token will not be passed to any downstream pipeline functions and will not be
513
+ * added to the index.
514
+ *
515
+ * Multiple tokens can be returned by returning an array of tokens. Each token will be passed
516
+ * to any downstream pipeline functions and all will returned tokens will be added to the index.
517
+ *
518
+ * Any number of pipeline functions may be chained together using a lunr.Pipeline.
519
+ *
520
+ * @interface lunr.PipelineFunction
521
+ * @param {lunr.Token} token - A token from the document being processed.
522
+ * @param {number} i - The index of this token in the complete list of tokens for this document/field.
523
+ * @param {lunr.Token[]} tokens - All tokens for this document/field.
524
+ * @returns {(?lunr.Token|lunr.Token[])}
525
+ */
526
+
527
+ /**
528
+ * Register a function with the pipeline.
529
+ *
530
+ * Functions that are used in the pipeline should be registered if the pipeline
531
+ * needs to be serialised, or a serialised pipeline needs to be loaded.
532
+ *
533
+ * Registering a function does not add it to a pipeline, functions must still be
534
+ * added to instances of the pipeline for them to be used when running a pipeline.
535
+ *
536
+ * @param {lunr.PipelineFunction} fn - The function to check for.
537
+ * @param {String} label - The label to register this function with
538
+ */
539
+ lunr.Pipeline.registerFunction = function (fn, label) {
540
+ if (label in this.registeredFunctions) {
541
+ lunr.utils.warn('Overwriting existing registered function: ' + label)
542
+ }
543
+
544
+ fn.label = label
545
+ lunr.Pipeline.registeredFunctions[fn.label] = fn
546
+ }
547
+
548
+ /**
549
+ * Warns if the function is not registered as a Pipeline function.
550
+ *
551
+ * @param {lunr.PipelineFunction} fn - The function to check for.
552
+ * @private
553
+ */
554
+ lunr.Pipeline.warnIfFunctionNotRegistered = function (fn) {
555
+ var isRegistered = fn.label && (fn.label in this.registeredFunctions)
556
+
557
+ if (!isRegistered) {
558
+ lunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\n', fn)
559
+ }
560
+ }
561
+
562
+ /**
563
+ * Loads a previously serialised pipeline.
564
+ *
565
+ * All functions to be loaded must already be registered with lunr.Pipeline.
566
+ * If any function from the serialised data has not been registered then an
567
+ * error will be thrown.
568
+ *
569
+ * @param {Object} serialised - The serialised pipeline to load.
570
+ * @returns {lunr.Pipeline}
571
+ */
572
+ lunr.Pipeline.load = function (serialised) {
573
+ var pipeline = new lunr.Pipeline
574
+
575
+ serialised.forEach(function (fnName) {
576
+ var fn = lunr.Pipeline.registeredFunctions[fnName]
577
+
578
+ if (fn) {
579
+ pipeline.add(fn)
580
+ } else {
581
+ throw new Error('Cannot load unregistered function: ' + fnName)
582
+ }
583
+ })
584
+
585
+ return pipeline
586
+ }
587
+
588
+ /**
589
+ * Adds new functions to the end of the pipeline.
590
+ *
591
+ * Logs a warning if the function has not been registered.
592
+ *
593
+ * @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline.
594
+ */
595
+ lunr.Pipeline.prototype.add = function () {
596
+ var fns = Array.prototype.slice.call(arguments)
597
+
598
+ fns.forEach(function (fn) {
599
+ lunr.Pipeline.warnIfFunctionNotRegistered(fn)
600
+ this._stack.push(fn)
601
+ }, this)
602
+ }
603
+
604
+ /**
605
+ * Adds a single function after a function that already exists in the
606
+ * pipeline.
607
+ *
608
+ * Logs a warning if the function has not been registered.
609
+ *
610
+ * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
611
+ * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
612
+ */
613
+ lunr.Pipeline.prototype.after = function (existingFn, newFn) {
614
+ lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
615
+
616
+ var pos = this._stack.indexOf(existingFn)
617
+ if (pos == -1) {
618
+ throw new Error('Cannot find existingFn')
619
+ }
620
+
621
+ pos = pos + 1
622
+ this._stack.splice(pos, 0, newFn)
623
+ }
624
+
625
+ /**
626
+ * Adds a single function before a function that already exists in the
627
+ * pipeline.
628
+ *
629
+ * Logs a warning if the function has not been registered.
630
+ *
631
+ * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
632
+ * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
633
+ */
634
+ lunr.Pipeline.prototype.before = function (existingFn, newFn) {
635
+ lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
636
+
637
+ var pos = this._stack.indexOf(existingFn)
638
+ if (pos == -1) {
639
+ throw new Error('Cannot find existingFn')
640
+ }
641
+
642
+ this._stack.splice(pos, 0, newFn)
643
+ }
644
+
645
+ /**
646
+ * Removes a function from the pipeline.
647
+ *
648
+ * @param {lunr.PipelineFunction} fn The function to remove from the pipeline.
649
+ */
650
+ lunr.Pipeline.prototype.remove = function (fn) {
651
+ var pos = this._stack.indexOf(fn)
652
+ if (pos == -1) {
653
+ return
654
+ }
655
+
656
+ this._stack.splice(pos, 1)
657
+ }
658
+
659
+ /**
660
+ * Runs the current list of functions that make up the pipeline against the
661
+ * passed tokens.
662
+ *
663
+ * @param {Array} tokens The tokens to run through the pipeline.
664
+ * @returns {Array}
665
+ */
666
+ lunr.Pipeline.prototype.run = function (tokens) {
667
+ var stackLength = this._stack.length
668
+
669
+ for (var i = 0; i < stackLength; i++) {
670
+ var fn = this._stack[i]
671
+ var memo = []
672
+
673
+ for (var j = 0; j < tokens.length; j++) {
674
+ var result = fn(tokens[j], j, tokens)
675
+
676
+ if (result === void 0 || result === '') continue
677
+
678
+ if (Array.isArray(result)) {
679
+ for (var k = 0; k < result.length; k++) {
680
+ memo.push(result[k])
681
+ }
682
+ } else {
683
+ memo.push(result)
684
+ }
685
+ }
686
+
687
+ tokens = memo
688
+ }
689
+
690
+ return tokens
691
+ }
692
+
693
+ /**
694
+ * Convenience method for passing a string through a pipeline and getting
695
+ * strings out. This method takes care of wrapping the passed string in a
696
+ * token and mapping the resulting tokens back to strings.
697
+ *
698
+ * @param {string} str - The string to pass through the pipeline.
699
+ * @param {?object} metadata - Optional metadata to associate with the token
700
+ * passed to the pipeline.
701
+ * @returns {string[]}
702
+ */
703
+ lunr.Pipeline.prototype.runString = function (str, metadata) {
704
+ var token = new lunr.Token (str, metadata)
705
+
706
+ return this.run([token]).map(function (t) {
707
+ return t.toString()
708
+ })
709
+ }
710
+
711
+ /**
712
+ * Resets the pipeline by removing any existing processors.
713
+ *
714
+ */
715
+ lunr.Pipeline.prototype.reset = function () {
716
+ this._stack = []
717
+ }
718
+
719
+ /**
720
+ * Returns a representation of the pipeline ready for serialisation.
721
+ *
722
+ * Logs a warning if the function has not been registered.
723
+ *
724
+ * @returns {Array}
725
+ */
726
+ lunr.Pipeline.prototype.toJSON = function () {
727
+ return this._stack.map(function (fn) {
728
+ lunr.Pipeline.warnIfFunctionNotRegistered(fn)
729
+
730
+ return fn.label
731
+ })
732
+ }
733
+ /*!
734
+ * lunr.Vector
735
+ * Copyright (C) 2019 Oliver Nightingale
736
+ */
737
+
738
+ /**
739
+ * A vector is used to construct the vector space of documents and queries. These
740
+ * vectors support operations to determine the similarity between two documents or
741
+ * a document and a query.
742
+ *
743
+ * Normally no parameters are required for initializing a vector, but in the case of
744
+ * loading a previously dumped vector the raw elements can be provided to the constructor.
745
+ *
746
+ * For performance reasons vectors are implemented with a flat array, where an elements
747
+ * index is immediately followed by its value. E.g. [index, value, index, value]. This
748
+ * allows the underlying array to be as sparse as possible and still offer decent
749
+ * performance when being used for vector calculations.
750
+ *
751
+ * @constructor
752
+ * @param {Number[]} [elements] - The flat list of element index and element value pairs.
753
+ */
754
+ lunr.Vector = function (elements) {
755
+ this._magnitude = 0
756
+ this.elements = elements || []
757
+ }
758
+
759
+
760
+ /**
761
+ * Calculates the position within the vector to insert a given index.
762
+ *
763
+ * This is used internally by insert and upsert. If there are duplicate indexes then
764
+ * the position is returned as if the value for that index were to be updated, but it
765
+ * is the callers responsibility to check whether there is a duplicate at that index
766
+ *
767
+ * @param {Number} insertIdx - The index at which the element should be inserted.
768
+ * @returns {Number}
769
+ */
770
+ lunr.Vector.prototype.positionForIndex = function (index) {
771
+ // For an empty vector the tuple can be inserted at the beginning
772
+ if (this.elements.length == 0) {
773
+ return 0
774
+ }
775
+
776
+ var start = 0,
777
+ end = this.elements.length / 2,
778
+ sliceLength = end - start,
779
+ pivotPoint = Math.floor(sliceLength / 2),
780
+ pivotIndex = this.elements[pivotPoint * 2]
781
+
782
+ while (sliceLength > 1) {
783
+ if (pivotIndex < index) {
784
+ start = pivotPoint
785
+ }
786
+
787
+ if (pivotIndex > index) {
788
+ end = pivotPoint
789
+ }
790
+
791
+ if (pivotIndex == index) {
792
+ break
793
+ }
794
+
795
+ sliceLength = end - start
796
+ pivotPoint = start + Math.floor(sliceLength / 2)
797
+ pivotIndex = this.elements[pivotPoint * 2]
798
+ }
799
+
800
+ if (pivotIndex == index) {
801
+ return pivotPoint * 2
802
+ }
803
+
804
+ if (pivotIndex > index) {
805
+ return pivotPoint * 2
806
+ }
807
+
808
+ if (pivotIndex < index) {
809
+ return (pivotPoint + 1) * 2
810
+ }
811
+ }
812
+
813
+ /**
814
+ * Inserts an element at an index within the vector.
815
+ *
816
+ * Does not allow duplicates, will throw an error if there is already an entry
817
+ * for this index.
818
+ *
819
+ * @param {Number} insertIdx - The index at which the element should be inserted.
820
+ * @param {Number} val - The value to be inserted into the vector.
821
+ */
822
+ lunr.Vector.prototype.insert = function (insertIdx, val) {
823
+ this.upsert(insertIdx, val, function () {
824
+ throw "duplicate index"
825
+ })
826
+ }
827
+
828
+ /**
829
+ * Inserts or updates an existing index within the vector.
830
+ *
831
+ * @param {Number} insertIdx - The index at which the element should be inserted.
832
+ * @param {Number} val - The value to be inserted into the vector.
833
+ * @param {function} fn - A function that is called for updates, the existing value and the
834
+ * requested value are passed as arguments
835
+ */
836
+ lunr.Vector.prototype.upsert = function (insertIdx, val, fn) {
837
+ this._magnitude = 0
838
+ var position = this.positionForIndex(insertIdx)
839
+
840
+ if (this.elements[position] == insertIdx) {
841
+ this.elements[position + 1] = fn(this.elements[position + 1], val)
842
+ } else {
843
+ this.elements.splice(position, 0, insertIdx, val)
844
+ }
845
+ }
846
+
847
+ /**
848
+ * Calculates the magnitude of this vector.
849
+ *
850
+ * @returns {Number}
851
+ */
852
+ lunr.Vector.prototype.magnitude = function () {
853
+ if (this._magnitude) return this._magnitude
854
+
855
+ var sumOfSquares = 0,
856
+ elementsLength = this.elements.length
857
+
858
+ for (var i = 1; i < elementsLength; i += 2) {
859
+ var val = this.elements[i]
860
+ sumOfSquares += val * val
861
+ }
862
+
863
+ return this._magnitude = Math.sqrt(sumOfSquares)
864
+ }
865
+
866
+ /**
867
+ * Calculates the dot product of this vector and another vector.
868
+ *
869
+ * @param {lunr.Vector} otherVector - The vector to compute the dot product with.
870
+ * @returns {Number}
871
+ */
872
+ lunr.Vector.prototype.dot = function (otherVector) {
873
+ var dotProduct = 0,
874
+ a = this.elements, b = otherVector.elements,
875
+ aLen = a.length, bLen = b.length,
876
+ aVal = 0, bVal = 0,
877
+ i = 0, j = 0
878
+
879
+ while (i < aLen && j < bLen) {
880
+ aVal = a[i], bVal = b[j]
881
+ if (aVal < bVal) {
882
+ i += 2
883
+ } else if (aVal > bVal) {
884
+ j += 2
885
+ } else if (aVal == bVal) {
886
+ dotProduct += a[i + 1] * b[j + 1]
887
+ i += 2
888
+ j += 2
889
+ }
890
+ }
891
+
892
+ return dotProduct
893
+ }
894
+
895
+ /**
896
+ * Calculates the similarity between this vector and another vector.
897
+ *
898
+ * @param {lunr.Vector} otherVector - The other vector to calculate the
899
+ * similarity with.
900
+ * @returns {Number}
901
+ */
902
+ lunr.Vector.prototype.similarity = function (otherVector) {
903
+ return this.dot(otherVector) / this.magnitude() || 0
904
+ }
905
+
906
+ /**
907
+ * Converts the vector to an array of the elements within the vector.
908
+ *
909
+ * @returns {Number[]}
910
+ */
911
+ lunr.Vector.prototype.toArray = function () {
912
+ var output = new Array (this.elements.length / 2)
913
+
914
+ for (var i = 1, j = 0; i < this.elements.length; i += 2, j++) {
915
+ output[j] = this.elements[i]
916
+ }
917
+
918
+ return output
919
+ }
920
+
921
+ /**
922
+ * A JSON serializable representation of the vector.
923
+ *
924
+ * @returns {Number[]}
925
+ */
926
+ lunr.Vector.prototype.toJSON = function () {
927
+ return this.elements
928
+ }
929
+ /* eslint-disable */
930
+ /*!
931
+ * lunr.stemmer
932
+ * Copyright (C) 2019 Oliver Nightingale
933
+ * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt
934
+ */
935
+
936
+ /**
937
+ * lunr.stemmer is an english language stemmer, this is a JavaScript
938
+ * implementation of the PorterStemmer taken from http://tartarus.org/~martin
939
+ *
940
+ * @static
941
+ * @implements {lunr.PipelineFunction}
942
+ * @param {lunr.Token} token - The string to stem
943
+ * @returns {lunr.Token}
944
+ * @see {@link lunr.Pipeline}
945
+ * @function
946
+ */
947
+ lunr.stemmer = (function(){
948
+ var step2list = {
949
+ "ational" : "ate",
950
+ "tional" : "tion",
951
+ "enci" : "ence",
952
+ "anci" : "ance",
953
+ "izer" : "ize",
954
+ "bli" : "ble",
955
+ "alli" : "al",
956
+ "entli" : "ent",
957
+ "eli" : "e",
958
+ "ousli" : "ous",
959
+ "ization" : "ize",
960
+ "ation" : "ate",
961
+ "ator" : "ate",
962
+ "alism" : "al",
963
+ "iveness" : "ive",
964
+ "fulness" : "ful",
965
+ "ousness" : "ous",
966
+ "aliti" : "al",
967
+ "iviti" : "ive",
968
+ "biliti" : "ble",
969
+ "logi" : "log"
970
+ },
971
+
972
+ step3list = {
973
+ "icate" : "ic",
974
+ "ative" : "",
975
+ "alize" : "al",
976
+ "iciti" : "ic",
977
+ "ical" : "ic",
978
+ "ful" : "",
979
+ "ness" : ""
980
+ },
981
+
982
+ c = "[^aeiou]", // consonant
983
+ v = "[aeiouy]", // vowel
984
+ C = c + "[^aeiouy]*", // consonant sequence
985
+ V = v + "[aeiou]*", // vowel sequence
986
+
987
+ mgr0 = "^(" + C + ")?" + V + C, // [C]VC... is m>0
988
+ meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$", // [C]VC[V] is m=1
989
+ mgr1 = "^(" + C + ")?" + V + C + V + C, // [C]VCVC... is m>1
990
+ s_v = "^(" + C + ")?" + v; // vowel in stem
991
+
992
+ var re_mgr0 = new RegExp(mgr0);
993
+ var re_mgr1 = new RegExp(mgr1);
994
+ var re_meq1 = new RegExp(meq1);
995
+ var re_s_v = new RegExp(s_v);
996
+
997
+ var re_1a = /^(.+?)(ss|i)es$/;
998
+ var re2_1a = /^(.+?)([^s])s$/;
999
+ var re_1b = /^(.+?)eed$/;
1000
+ var re2_1b = /^(.+?)(ed|ing)$/;
1001
+ var re_1b_2 = /.$/;
1002
+ var re2_1b_2 = /(at|bl|iz)$/;
1003
+ var re3_1b_2 = new RegExp("([^aeiouylsz])\\1$");
1004
+ var re4_1b_2 = new RegExp("^" + C + v + "[^aeiouwxy]$");
1005
+
1006
+ var re_1c = /^(.+?[^aeiou])y$/;
1007
+ var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
1008
+
1009
+ var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
1010
+
1011
+ var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
1012
+ var re2_4 = /^(.+?)(s|t)(ion)$/;
1013
+
1014
+ var re_5 = /^(.+?)e$/;
1015
+ var re_5_1 = /ll$/;
1016
+ var re3_5 = new RegExp("^" + C + v + "[^aeiouwxy]$");
1017
+
1018
+ var porterStemmer = function porterStemmer(w) {
1019
+ var stem,
1020
+ suffix,
1021
+ firstch,
1022
+ re,
1023
+ re2,
1024
+ re3,
1025
+ re4;
1026
+
1027
+ if (w.length < 3) { return w; }
1028
+
1029
+ firstch = w.substr(0,1);
1030
+ if (firstch == "y") {
1031
+ w = firstch.toUpperCase() + w.substr(1);
1032
+ }
1033
+
1034
+ // Step 1a
1035
+ re = re_1a
1036
+ re2 = re2_1a;
1037
+
1038
+ if (re.test(w)) { w = w.replace(re,"$1$2"); }
1039
+ else if (re2.test(w)) { w = w.replace(re2,"$1$2"); }
1040
+
1041
+ // Step 1b
1042
+ re = re_1b;
1043
+ re2 = re2_1b;
1044
+ if (re.test(w)) {
1045
+ var fp = re.exec(w);
1046
+ re = re_mgr0;
1047
+ if (re.test(fp[1])) {
1048
+ re = re_1b_2;
1049
+ w = w.replace(re,"");
1050
+ }
1051
+ } else if (re2.test(w)) {
1052
+ var fp = re2.exec(w);
1053
+ stem = fp[1];
1054
+ re2 = re_s_v;
1055
+ if (re2.test(stem)) {
1056
+ w = stem;
1057
+ re2 = re2_1b_2;
1058
+ re3 = re3_1b_2;
1059
+ re4 = re4_1b_2;
1060
+ if (re2.test(w)) { w = w + "e"; }
1061
+ else if (re3.test(w)) { re = re_1b_2; w = w.replace(re,""); }
1062
+ else if (re4.test(w)) { w = w + "e"; }
1063
+ }
1064
+ }
1065
+
1066
+ // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say)
1067
+ re = re_1c;
1068
+ if (re.test(w)) {
1069
+ var fp = re.exec(w);
1070
+ stem = fp[1];
1071
+ w = stem + "i";
1072
+ }
1073
+
1074
+ // Step 2
1075
+ re = re_2;
1076
+ if (re.test(w)) {
1077
+ var fp = re.exec(w);
1078
+ stem = fp[1];
1079
+ suffix = fp[2];
1080
+ re = re_mgr0;
1081
+ if (re.test(stem)) {
1082
+ w = stem + step2list[suffix];
1083
+ }
1084
+ }
1085
+
1086
+ // Step 3
1087
+ re = re_3;
1088
+ if (re.test(w)) {
1089
+ var fp = re.exec(w);
1090
+ stem = fp[1];
1091
+ suffix = fp[2];
1092
+ re = re_mgr0;
1093
+ if (re.test(stem)) {
1094
+ w = stem + step3list[suffix];
1095
+ }
1096
+ }
1097
+
1098
+ // Step 4
1099
+ re = re_4;
1100
+ re2 = re2_4;
1101
+ if (re.test(w)) {
1102
+ var fp = re.exec(w);
1103
+ stem = fp[1];
1104
+ re = re_mgr1;
1105
+ if (re.test(stem)) {
1106
+ w = stem;
1107
+ }
1108
+ } else if (re2.test(w)) {
1109
+ var fp = re2.exec(w);
1110
+ stem = fp[1] + fp[2];
1111
+ re2 = re_mgr1;
1112
+ if (re2.test(stem)) {
1113
+ w = stem;
1114
+ }
1115
+ }
1116
+
1117
+ // Step 5
1118
+ re = re_5;
1119
+ if (re.test(w)) {
1120
+ var fp = re.exec(w);
1121
+ stem = fp[1];
1122
+ re = re_mgr1;
1123
+ re2 = re_meq1;
1124
+ re3 = re3_5;
1125
+ if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) {
1126
+ w = stem;
1127
+ }
1128
+ }
1129
+
1130
+ re = re_5_1;
1131
+ re2 = re_mgr1;
1132
+ if (re.test(w) && re2.test(w)) {
1133
+ re = re_1b_2;
1134
+ w = w.replace(re,"");
1135
+ }
1136
+
1137
+ // and turn initial Y back to y
1138
+
1139
+ if (firstch == "y") {
1140
+ w = firstch.toLowerCase() + w.substr(1);
1141
+ }
1142
+
1143
+ return w;
1144
+ };
1145
+
1146
+ return function (token) {
1147
+ return token.update(porterStemmer);
1148
+ }
1149
+ })();
1150
+
1151
+ lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer')
1152
+ /*!
1153
+ * lunr.stopWordFilter
1154
+ * Copyright (C) 2019 Oliver Nightingale
1155
+ */
1156
+
1157
+ /**
1158
+ * lunr.generateStopWordFilter builds a stopWordFilter function from the provided
1159
+ * list of stop words.
1160
+ *
1161
+ * The built in lunr.stopWordFilter is built using this generator and can be used
1162
+ * to generate custom stopWordFilters for applications or non English languages.
1163
+ *
1164
+ * @function
1165
+ * @param {Array} token The token to pass through the filter
1166
+ * @returns {lunr.PipelineFunction}
1167
+ * @see lunr.Pipeline
1168
+ * @see lunr.stopWordFilter
1169
+ */
1170
+ lunr.generateStopWordFilter = function (stopWords) {
1171
+ var words = stopWords.reduce(function (memo, stopWord) {
1172
+ memo[stopWord] = stopWord
1173
+ return memo
1174
+ }, {})
1175
+
1176
+ return function (token) {
1177
+ if (token && words[token.toString()] !== token.toString()) return token
1178
+ }
1179
+ }
1180
+
1181
+ /**
1182
+ * lunr.stopWordFilter is an English language stop word list filter, any words
1183
+ * contained in the list will not be passed through the filter.
1184
+ *
1185
+ * This is intended to be used in the Pipeline. If the token does not pass the
1186
+ * filter then undefined will be returned.
1187
+ *
1188
+ * @function
1189
+ * @implements {lunr.PipelineFunction}
1190
+ * @params {lunr.Token} token - A token to check for being a stop word.
1191
+ * @returns {lunr.Token}
1192
+ * @see {@link lunr.Pipeline}
1193
+ */
1194
+ lunr.stopWordFilter = lunr.generateStopWordFilter([
1195
+ 'a',
1196
+ 'able',
1197
+ 'about',
1198
+ 'across',
1199
+ 'after',
1200
+ 'all',
1201
+ 'almost',
1202
+ 'also',
1203
+ 'am',
1204
+ 'among',
1205
+ 'an',
1206
+ 'and',
1207
+ 'any',
1208
+ 'are',
1209
+ 'as',
1210
+ 'at',
1211
+ 'be',
1212
+ 'because',
1213
+ 'been',
1214
+ 'but',
1215
+ 'by',
1216
+ 'can',
1217
+ 'cannot',
1218
+ 'could',
1219
+ 'dear',
1220
+ 'did',
1221
+ 'do',
1222
+ 'does',
1223
+ 'either',
1224
+ 'else',
1225
+ 'ever',
1226
+ 'every',
1227
+ 'for',
1228
+ 'from',
1229
+ 'get',
1230
+ 'got',
1231
+ 'had',
1232
+ 'has',
1233
+ 'have',
1234
+ 'he',
1235
+ 'her',
1236
+ 'hers',
1237
+ 'him',
1238
+ 'his',
1239
+ 'how',
1240
+ 'however',
1241
+ 'i',
1242
+ 'if',
1243
+ 'in',
1244
+ 'into',
1245
+ 'is',
1246
+ 'it',
1247
+ 'its',
1248
+ 'just',
1249
+ 'least',
1250
+ 'let',
1251
+ 'like',
1252
+ 'likely',
1253
+ 'may',
1254
+ 'me',
1255
+ 'might',
1256
+ 'most',
1257
+ 'must',
1258
+ 'my',
1259
+ 'neither',
1260
+ 'no',
1261
+ 'nor',
1262
+ 'not',
1263
+ 'of',
1264
+ 'off',
1265
+ 'often',
1266
+ 'on',
1267
+ 'only',
1268
+ 'or',
1269
+ 'other',
1270
+ 'our',
1271
+ 'own',
1272
+ 'rather',
1273
+ 'said',
1274
+ 'say',
1275
+ 'says',
1276
+ 'she',
1277
+ 'should',
1278
+ 'since',
1279
+ 'so',
1280
+ 'some',
1281
+ 'than',
1282
+ 'that',
1283
+ 'the',
1284
+ 'their',
1285
+ 'them',
1286
+ 'then',
1287
+ 'there',
1288
+ 'these',
1289
+ 'they',
1290
+ 'this',
1291
+ 'tis',
1292
+ 'to',
1293
+ 'too',
1294
+ 'twas',
1295
+ 'us',
1296
+ 'wants',
1297
+ 'was',
1298
+ 'we',
1299
+ 'were',
1300
+ 'what',
1301
+ 'when',
1302
+ 'where',
1303
+ 'which',
1304
+ 'while',
1305
+ 'who',
1306
+ 'whom',
1307
+ 'why',
1308
+ 'will',
1309
+ 'with',
1310
+ 'would',
1311
+ 'yet',
1312
+ 'you',
1313
+ 'your'
1314
+ ])
1315
+
1316
+ lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter')
1317
+ /*!
1318
+ * lunr.trimmer
1319
+ * Copyright (C) 2019 Oliver Nightingale
1320
+ */
1321
+
1322
+ /**
1323
+ * lunr.trimmer is a pipeline function for trimming non word
1324
+ * characters from the beginning and end of tokens before they
1325
+ * enter the index.
1326
+ *
1327
+ * This implementation may not work correctly for non latin
1328
+ * characters and should either be removed or adapted for use
1329
+ * with languages with non-latin characters.
1330
+ *
1331
+ * @static
1332
+ * @implements {lunr.PipelineFunction}
1333
+ * @param {lunr.Token} token The token to pass through the filter
1334
+ * @returns {lunr.Token}
1335
+ * @see lunr.Pipeline
1336
+ */
1337
+ lunr.trimmer = function (token) {
1338
+ return token.update(function (s) {
1339
+ return s.replace(/^\W+/, '').replace(/\W+$/, '')
1340
+ })
1341
+ }
1342
+
1343
+ lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer')
1344
+ /*!
1345
+ * lunr.TokenSet
1346
+ * Copyright (C) 2019 Oliver Nightingale
1347
+ */
1348
+
1349
+ /**
1350
+ * A token set is used to store the unique list of all tokens
1351
+ * within an index. Token sets are also used to represent an
1352
+ * incoming query to the index, this query token set and index
1353
+ * token set are then intersected to find which tokens to look
1354
+ * up in the inverted index.
1355
+ *
1356
+ * A token set can hold multiple tokens, as in the case of the
1357
+ * index token set, or it can hold a single token as in the
1358
+ * case of a simple query token set.
1359
+ *
1360
+ * Additionally token sets are used to perform wildcard matching.
1361
+ * Leading, contained and trailing wildcards are supported, and
1362
+ * from this edit distance matching can also be provided.
1363
+ *
1364
+ * Token sets are implemented as a minimal finite state automata,
1365
+ * where both common prefixes and suffixes are shared between tokens.
1366
+ * This helps to reduce the space used for storing the token set.
1367
+ *
1368
+ * @constructor
1369
+ */
1370
+ lunr.TokenSet = function () {
1371
+ this.final = false
1372
+ this.edges = {}
1373
+ this.id = lunr.TokenSet._nextId
1374
+ lunr.TokenSet._nextId += 1
1375
+ }
1376
+
1377
+ /**
1378
+ * Keeps track of the next, auto increment, identifier to assign
1379
+ * to a new tokenSet.
1380
+ *
1381
+ * TokenSets require a unique identifier to be correctly minimised.
1382
+ *
1383
+ * @private
1384
+ */
1385
+ lunr.TokenSet._nextId = 1
1386
+
1387
+ /**
1388
+ * Creates a TokenSet instance from the given sorted array of words.
1389
+ *
1390
+ * @param {String[]} arr - A sorted array of strings to create the set from.
1391
+ * @returns {lunr.TokenSet}
1392
+ * @throws Will throw an error if the input array is not sorted.
1393
+ */
1394
+ lunr.TokenSet.fromArray = function (arr) {
1395
+ var builder = new lunr.TokenSet.Builder
1396
+
1397
+ for (var i = 0, len = arr.length; i < len; i++) {
1398
+ builder.insert(arr[i])
1399
+ }
1400
+
1401
+ builder.finish()
1402
+ return builder.root
1403
+ }
1404
+
1405
+ /**
1406
+ * Creates a token set from a query clause.
1407
+ *
1408
+ * @private
1409
+ * @param {Object} clause - A single clause from lunr.Query.
1410
+ * @param {string} clause.term - The query clause term.
1411
+ * @param {number} [clause.editDistance] - The optional edit distance for the term.
1412
+ * @returns {lunr.TokenSet}
1413
+ */
1414
+ lunr.TokenSet.fromClause = function (clause) {
1415
+ if ('editDistance' in clause) {
1416
+ return lunr.TokenSet.fromFuzzyString(clause.term, clause.editDistance)
1417
+ } else {
1418
+ return lunr.TokenSet.fromString(clause.term)
1419
+ }
1420
+ }
1421
+
1422
+ /**
1423
+ * Creates a token set representing a single string with a specified
1424
+ * edit distance.
1425
+ *
1426
+ * Insertions, deletions, substitutions and transpositions are each
1427
+ * treated as an edit distance of 1.
1428
+ *
1429
+ * Increasing the allowed edit distance will have a dramatic impact
1430
+ * on the performance of both creating and intersecting these TokenSets.
1431
+ * It is advised to keep the edit distance less than 3.
1432
+ *
1433
+ * @param {string} str - The string to create the token set from.
1434
+ * @param {number} editDistance - The allowed edit distance to match.
1435
+ * @returns {lunr.Vector}
1436
+ */
1437
+ lunr.TokenSet.fromFuzzyString = function (str, editDistance) {
1438
+ var root = new lunr.TokenSet
1439
+
1440
+ var stack = [{
1441
+ node: root,
1442
+ editsRemaining: editDistance,
1443
+ str: str
1444
+ }]
1445
+
1446
+ while (stack.length) {
1447
+ var frame = stack.pop()
1448
+
1449
+ // no edit
1450
+ if (frame.str.length > 0) {
1451
+ var char = frame.str.charAt(0),
1452
+ noEditNode
1453
+
1454
+ if (char in frame.node.edges) {
1455
+ noEditNode = frame.node.edges[char]
1456
+ } else {
1457
+ noEditNode = new lunr.TokenSet
1458
+ frame.node.edges[char] = noEditNode
1459
+ }
1460
+
1461
+ if (frame.str.length == 1) {
1462
+ noEditNode.final = true
1463
+ }
1464
+
1465
+ stack.push({
1466
+ node: noEditNode,
1467
+ editsRemaining: frame.editsRemaining,
1468
+ str: frame.str.slice(1)
1469
+ })
1470
+ }
1471
+
1472
+ if (frame.editsRemaining == 0) {
1473
+ continue
1474
+ }
1475
+
1476
+ // insertion
1477
+ if ("*" in frame.node.edges) {
1478
+ var insertionNode = frame.node.edges["*"]
1479
+ } else {
1480
+ var insertionNode = new lunr.TokenSet
1481
+ frame.node.edges["*"] = insertionNode
1482
+ }
1483
+
1484
+ if (frame.str.length == 0) {
1485
+ insertionNode.final = true
1486
+ }
1487
+
1488
+ stack.push({
1489
+ node: insertionNode,
1490
+ editsRemaining: frame.editsRemaining - 1,
1491
+ str: frame.str
1492
+ })
1493
+
1494
+ // deletion
1495
+ // can only do a deletion if we have enough edits remaining
1496
+ // and if there are characters left to delete in the string
1497
+ if (frame.str.length > 1) {
1498
+ stack.push({
1499
+ node: frame.node,
1500
+ editsRemaining: frame.editsRemaining - 1,
1501
+ str: frame.str.slice(1)
1502
+ })
1503
+ }
1504
+
1505
+ // deletion
1506
+ // just removing the last character from the str
1507
+ if (frame.str.length == 1) {
1508
+ frame.node.final = true
1509
+ }
1510
+
1511
+ // substitution
1512
+ // can only do a substitution if we have enough edits remaining
1513
+ // and if there are characters left to substitute
1514
+ if (frame.str.length >= 1) {
1515
+ if ("*" in frame.node.edges) {
1516
+ var substitutionNode = frame.node.edges["*"]
1517
+ } else {
1518
+ var substitutionNode = new lunr.TokenSet
1519
+ frame.node.edges["*"] = substitutionNode
1520
+ }
1521
+
1522
+ if (frame.str.length == 1) {
1523
+ substitutionNode.final = true
1524
+ }
1525
+
1526
+ stack.push({
1527
+ node: substitutionNode,
1528
+ editsRemaining: frame.editsRemaining - 1,
1529
+ str: frame.str.slice(1)
1530
+ })
1531
+ }
1532
+
1533
+ // transposition
1534
+ // can only do a transposition if there are edits remaining
1535
+ // and there are enough characters to transpose
1536
+ if (frame.str.length > 1) {
1537
+ var charA = frame.str.charAt(0),
1538
+ charB = frame.str.charAt(1),
1539
+ transposeNode
1540
+
1541
+ if (charB in frame.node.edges) {
1542
+ transposeNode = frame.node.edges[charB]
1543
+ } else {
1544
+ transposeNode = new lunr.TokenSet
1545
+ frame.node.edges[charB] = transposeNode
1546
+ }
1547
+
1548
+ if (frame.str.length == 1) {
1549
+ transposeNode.final = true
1550
+ }
1551
+
1552
+ stack.push({
1553
+ node: transposeNode,
1554
+ editsRemaining: frame.editsRemaining - 1,
1555
+ str: charA + frame.str.slice(2)
1556
+ })
1557
+ }
1558
+ }
1559
+
1560
+ return root
1561
+ }
1562
+
1563
+ /**
1564
+ * Creates a TokenSet from a string.
1565
+ *
1566
+ * The string may contain one or more wildcard characters (*)
1567
+ * that will allow wildcard matching when intersecting with
1568
+ * another TokenSet.
1569
+ *
1570
+ * @param {string} str - The string to create a TokenSet from.
1571
+ * @returns {lunr.TokenSet}
1572
+ */
1573
+ lunr.TokenSet.fromString = function (str) {
1574
+ var node = new lunr.TokenSet,
1575
+ root = node
1576
+
1577
+ /*
1578
+ * Iterates through all characters within the passed string
1579
+ * appending a node for each character.
1580
+ *
1581
+ * When a wildcard character is found then a self
1582
+ * referencing edge is introduced to continually match
1583
+ * any number of any characters.
1584
+ */
1585
+ for (var i = 0, len = str.length; i < len; i++) {
1586
+ var char = str[i],
1587
+ final = (i == len - 1)
1588
+
1589
+ if (char == "*") {
1590
+ node.edges[char] = node
1591
+ node.final = final
1592
+
1593
+ } else {
1594
+ var next = new lunr.TokenSet
1595
+ next.final = final
1596
+
1597
+ node.edges[char] = next
1598
+ node = next
1599
+ }
1600
+ }
1601
+
1602
+ return root
1603
+ }
1604
+
1605
+ /**
1606
+ * Converts this TokenSet into an array of strings
1607
+ * contained within the TokenSet.
1608
+ *
1609
+ * @returns {string[]}
1610
+ */
1611
+ lunr.TokenSet.prototype.toArray = function () {
1612
+ var words = []
1613
+
1614
+ var stack = [{
1615
+ prefix: "",
1616
+ node: this
1617
+ }]
1618
+
1619
+ while (stack.length) {
1620
+ var frame = stack.pop(),
1621
+ edges = Object.keys(frame.node.edges),
1622
+ len = edges.length
1623
+
1624
+ if (frame.node.final) {
1625
+ /* In Safari, at this point the prefix is sometimes corrupted, see:
1626
+ * https://github.com/olivernn/lunr.js/issues/279 Calling any
1627
+ * String.prototype method forces Safari to "cast" this string to what
1628
+ * it's supposed to be, fixing the bug. */
1629
+ frame.prefix.charAt(0)
1630
+ words.push(frame.prefix)
1631
+ }
1632
+
1633
+ for (var i = 0; i < len; i++) {
1634
+ var edge = edges[i]
1635
+
1636
+ stack.push({
1637
+ prefix: frame.prefix.concat(edge),
1638
+ node: frame.node.edges[edge]
1639
+ })
1640
+ }
1641
+ }
1642
+
1643
+ return words
1644
+ }
1645
+
1646
+ /**
1647
+ * Generates a string representation of a TokenSet.
1648
+ *
1649
+ * This is intended to allow TokenSets to be used as keys
1650
+ * in objects, largely to aid the construction and minimisation
1651
+ * of a TokenSet. As such it is not designed to be a human
1652
+ * friendly representation of the TokenSet.
1653
+ *
1654
+ * @returns {string}
1655
+ */
1656
+ lunr.TokenSet.prototype.toString = function () {
1657
+ // NOTE: Using Object.keys here as this.edges is very likely
1658
+ // to enter 'hash-mode' with many keys being added
1659
+ //
1660
+ // avoiding a for-in loop here as it leads to the function
1661
+ // being de-optimised (at least in V8). From some simple
1662
+ // benchmarks the performance is comparable, but allowing
1663
+ // V8 to optimize may mean easy performance wins in the future.
1664
+
1665
+ if (this._str) {
1666
+ return this._str
1667
+ }
1668
+
1669
+ var str = this.final ? '1' : '0',
1670
+ labels = Object.keys(this.edges).sort(),
1671
+ len = labels.length
1672
+
1673
+ for (var i = 0; i < len; i++) {
1674
+ var label = labels[i],
1675
+ node = this.edges[label]
1676
+
1677
+ str = str + label + node.id
1678
+ }
1679
+
1680
+ return str
1681
+ }
1682
+
1683
+ /**
1684
+ * Returns a new TokenSet that is the intersection of
1685
+ * this TokenSet and the passed TokenSet.
1686
+ *
1687
+ * This intersection will take into account any wildcards
1688
+ * contained within the TokenSet.
1689
+ *
1690
+ * @param {lunr.TokenSet} b - An other TokenSet to intersect with.
1691
+ * @returns {lunr.TokenSet}
1692
+ */
1693
+ lunr.TokenSet.prototype.intersect = function (b) {
1694
+ var output = new lunr.TokenSet,
1695
+ frame = undefined
1696
+
1697
+ var stack = [{
1698
+ qNode: b,
1699
+ output: output,
1700
+ node: this
1701
+ }]
1702
+
1703
+ while (stack.length) {
1704
+ frame = stack.pop()
1705
+
1706
+ // NOTE: As with the #toString method, we are using
1707
+ // Object.keys and a for loop instead of a for-in loop
1708
+ // as both of these objects enter 'hash' mode, causing
1709
+ // the function to be de-optimised in V8
1710
+ var qEdges = Object.keys(frame.qNode.edges),
1711
+ qLen = qEdges.length,
1712
+ nEdges = Object.keys(frame.node.edges),
1713
+ nLen = nEdges.length
1714
+
1715
+ for (var q = 0; q < qLen; q++) {
1716
+ var qEdge = qEdges[q]
1717
+
1718
+ for (var n = 0; n < nLen; n++) {
1719
+ var nEdge = nEdges[n]
1720
+
1721
+ if (nEdge == qEdge || qEdge == '*') {
1722
+ var node = frame.node.edges[nEdge],
1723
+ qNode = frame.qNode.edges[qEdge],
1724
+ final = node.final && qNode.final,
1725
+ next = undefined
1726
+
1727
+ if (nEdge in frame.output.edges) {
1728
+ // an edge already exists for this character
1729
+ // no need to create a new node, just set the finality
1730
+ // bit unless this node is already final
1731
+ next = frame.output.edges[nEdge]
1732
+ next.final = next.final || final
1733
+
1734
+ } else {
1735
+ // no edge exists yet, must create one
1736
+ // set the finality bit and insert it
1737
+ // into the output
1738
+ next = new lunr.TokenSet
1739
+ next.final = final
1740
+ frame.output.edges[nEdge] = next
1741
+ }
1742
+
1743
+ stack.push({
1744
+ qNode: qNode,
1745
+ output: next,
1746
+ node: node
1747
+ })
1748
+ }
1749
+ }
1750
+ }
1751
+ }
1752
+
1753
+ return output
1754
+ }
1755
+ lunr.TokenSet.Builder = function () {
1756
+ this.previousWord = ""
1757
+ this.root = new lunr.TokenSet
1758
+ this.uncheckedNodes = []
1759
+ this.minimizedNodes = {}
1760
+ }
1761
+
1762
+ lunr.TokenSet.Builder.prototype.insert = function (word) {
1763
+ var node,
1764
+ commonPrefix = 0
1765
+
1766
+ if (word < this.previousWord) {
1767
+ throw new Error ("Out of order word insertion")
1768
+ }
1769
+
1770
+ for (var i = 0; i < word.length && i < this.previousWord.length; i++) {
1771
+ if (word[i] != this.previousWord[i]) break
1772
+ commonPrefix++
1773
+ }
1774
+
1775
+ this.minimize(commonPrefix)
1776
+
1777
+ if (this.uncheckedNodes.length == 0) {
1778
+ node = this.root
1779
+ } else {
1780
+ node = this.uncheckedNodes[this.uncheckedNodes.length - 1].child
1781
+ }
1782
+
1783
+ for (var i = commonPrefix; i < word.length; i++) {
1784
+ var nextNode = new lunr.TokenSet,
1785
+ char = word[i]
1786
+
1787
+ node.edges[char] = nextNode
1788
+
1789
+ this.uncheckedNodes.push({
1790
+ parent: node,
1791
+ char: char,
1792
+ child: nextNode
1793
+ })
1794
+
1795
+ node = nextNode
1796
+ }
1797
+
1798
+ node.final = true
1799
+ this.previousWord = word
1800
+ }
1801
+
1802
+ lunr.TokenSet.Builder.prototype.finish = function () {
1803
+ this.minimize(0)
1804
+ }
1805
+
1806
+ lunr.TokenSet.Builder.prototype.minimize = function (downTo) {
1807
+ for (var i = this.uncheckedNodes.length - 1; i >= downTo; i--) {
1808
+ var node = this.uncheckedNodes[i],
1809
+ childKey = node.child.toString()
1810
+
1811
+ if (childKey in this.minimizedNodes) {
1812
+ node.parent.edges[node.char] = this.minimizedNodes[childKey]
1813
+ } else {
1814
+ // Cache the key for this node since
1815
+ // we know it can't change anymore
1816
+ node.child._str = childKey
1817
+
1818
+ this.minimizedNodes[childKey] = node.child
1819
+ }
1820
+
1821
+ this.uncheckedNodes.pop()
1822
+ }
1823
+ }
1824
+ /*!
1825
+ * lunr.Index
1826
+ * Copyright (C) 2019 Oliver Nightingale
1827
+ */
1828
+
1829
+ /**
1830
+ * An index contains the built index of all documents and provides a query interface
1831
+ * to the index.
1832
+ *
1833
+ * Usually instances of lunr.Index will not be created using this constructor, instead
1834
+ * lunr.Builder should be used to construct new indexes, or lunr.Index.load should be
1835
+ * used to load previously built and serialized indexes.
1836
+ *
1837
+ * @constructor
1838
+ * @param {Object} attrs - The attributes of the built search index.
1839
+ * @param {Object} attrs.invertedIndex - An index of term/field to document reference.
1840
+ * @param {Object<string, lunr.Vector>} attrs.fieldVectors - Field vectors
1841
+ * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens.
1842
+ * @param {string[]} attrs.fields - The names of indexed document fields.
1843
+ * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms.
1844
+ */
1845
+ lunr.Index = function (attrs) {
1846
+ this.invertedIndex = attrs.invertedIndex
1847
+ this.fieldVectors = attrs.fieldVectors
1848
+ this.tokenSet = attrs.tokenSet
1849
+ this.fields = attrs.fields
1850
+ this.pipeline = attrs.pipeline
1851
+ }
1852
+
1853
+ /**
1854
+ * A result contains details of a document matching a search query.
1855
+ * @typedef {Object} lunr.Index~Result
1856
+ * @property {string} ref - The reference of the document this result represents.
1857
+ * @property {number} score - A number between 0 and 1 representing how similar this document is to the query.
1858
+ * @property {lunr.MatchData} matchData - Contains metadata about this match including which term(s) caused the match.
1859
+ */
1860
+
1861
+ /**
1862
+ * Although lunr provides the ability to create queries using lunr.Query, it also provides a simple
1863
+ * query language which itself is parsed into an instance of lunr.Query.
1864
+ *
1865
+ * For programmatically building queries it is advised to directly use lunr.Query, the query language
1866
+ * is best used for human entered text rather than program generated text.
1867
+ *
1868
+ * At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported
1869
+ * and will be combined with OR, e.g `hello world` will match documents that contain either 'hello'
1870
+ * or 'world', though those that contain both will rank higher in the results.
1871
+ *
1872
+ * Wildcards can be included in terms to match one or more unspecified characters, these wildcards can
1873
+ * be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding
1874
+ * wildcards will increase the number of documents that will be found but can also have a negative
1875
+ * impact on query performance, especially with wildcards at the beginning of a term.
1876
+ *
1877
+ * Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term
1878
+ * hello in the title field will match this query. Using a field not present in the index will lead
1879
+ * to an error being thrown.
1880
+ *
1881
+ * Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term
1882
+ * boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported
1883
+ * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2.
1884
+ * Avoid large values for edit distance to improve query performance.
1885
+ *
1886
+ * Each term also supports a presence modifier. By default a term's presence in document is optional, however
1887
+ * this can be changed to either required or prohibited. For a term's presence to be required in a document the
1888
+ * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and
1889
+ * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not
1890
+ * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'.
1891
+ *
1892
+ * To escape special characters the backslash character '\' can be used, this allows searches to include
1893
+ * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead
1894
+ * of attempting to apply a boost of 2 to the search term "foo".
1895
+ *
1896
+ * @typedef {string} lunr.Index~QueryString
1897
+ * @example <caption>Simple single term query</caption>
1898
+ * hello
1899
+ * @example <caption>Multiple term query</caption>
1900
+ * hello world
1901
+ * @example <caption>term scoped to a field</caption>
1902
+ * title:hello
1903
+ * @example <caption>term with a boost of 10</caption>
1904
+ * hello^10
1905
+ * @example <caption>term with an edit distance of 2</caption>
1906
+ * hello~2
1907
+ * @example <caption>terms with presence modifiers</caption>
1908
+ * -foo +bar baz
1909
+ */
1910
+
1911
+ /**
1912
+ * Performs a search against the index using lunr query syntax.
1913
+ *
1914
+ * Results will be returned sorted by their score, the most relevant results
1915
+ * will be returned first. For details on how the score is calculated, please see
1916
+ * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}.
1917
+ *
1918
+ * For more programmatic querying use lunr.Index#query.
1919
+ *
1920
+ * @param {lunr.Index~QueryString} queryString - A string containing a lunr query.
1921
+ * @throws {lunr.QueryParseError} If the passed query string cannot be parsed.
1922
+ * @returns {lunr.Index~Result[]}
1923
+ */
1924
+ lunr.Index.prototype.search = function (queryString) {
1925
+ return this.query(function (query) {
1926
+ var parser = new lunr.QueryParser(queryString, query)
1927
+ parser.parse()
1928
+ })
1929
+ }
1930
+
1931
+ /**
1932
+ * A query builder callback provides a query object to be used to express
1933
+ * the query to perform on the index.
1934
+ *
1935
+ * @callback lunr.Index~queryBuilder
1936
+ * @param {lunr.Query} query - The query object to build up.
1937
+ * @this lunr.Query
1938
+ */
1939
+
1940
+ /**
1941
+ * Performs a query against the index using the yielded lunr.Query object.
1942
+ *
1943
+ * If performing programmatic queries against the index, this method is preferred
1944
+ * over lunr.Index#search so as to avoid the additional query parsing overhead.
1945
+ *
1946
+ * A query object is yielded to the supplied function which should be used to
1947
+ * express the query to be run against the index.
1948
+ *
1949
+ * Note that although this function takes a callback parameter it is _not_ an
1950
+ * asynchronous operation, the callback is just yielded a query object to be
1951
+ * customized.
1952
+ *
1953
+ * @param {lunr.Index~queryBuilder} fn - A function that is used to build the query.
1954
+ * @returns {lunr.Index~Result[]}
1955
+ */
1956
+ lunr.Index.prototype.query = function (fn) {
1957
+ // for each query clause
1958
+ // * process terms
1959
+ // * expand terms from token set
1960
+ // * find matching documents and metadata
1961
+ // * get document vectors
1962
+ // * score documents
1963
+
1964
+ var query = new lunr.Query(this.fields),
1965
+ matchingFields = Object.create(null),
1966
+ queryVectors = Object.create(null),
1967
+ termFieldCache = Object.create(null),
1968
+ requiredMatches = Object.create(null),
1969
+ prohibitedMatches = Object.create(null)
1970
+
1971
+ /*
1972
+ * To support field level boosts a query vector is created per
1973
+ * field. An empty vector is eagerly created to support negated
1974
+ * queries.
1975
+ */
1976
+ for (var i = 0; i < this.fields.length; i++) {
1977
+ queryVectors[this.fields[i]] = new lunr.Vector
1978
+ }
1979
+
1980
+ fn.call(query, query)
1981
+
1982
+ for (var i = 0; i < query.clauses.length; i++) {
1983
+ /*
1984
+ * Unless the pipeline has been disabled for this term, which is
1985
+ * the case for terms with wildcards, we need to pass the clause
1986
+ * term through the search pipeline. A pipeline returns an array
1987
+ * of processed terms. Pipeline functions may expand the passed
1988
+ * term, which means we may end up performing multiple index lookups
1989
+ * for a single query term.
1990
+ */
1991
+ var clause = query.clauses[i],
1992
+ terms = null,
1993
+ clauseMatches = lunr.Set.complete
1994
+
1995
+ if (clause.usePipeline) {
1996
+ terms = this.pipeline.runString(clause.term, {
1997
+ fields: clause.fields
1998
+ })
1999
+ } else {
2000
+ terms = [clause.term]
2001
+ }
2002
+
2003
+ for (var m = 0; m < terms.length; m++) {
2004
+ var term = terms[m]
2005
+
2006
+ /*
2007
+ * Each term returned from the pipeline needs to use the same query
2008
+ * clause object, e.g. the same boost and or edit distance. The
2009
+ * simplest way to do this is to re-use the clause object but mutate
2010
+ * its term property.
2011
+ */
2012
+ clause.term = term
2013
+
2014
+ /*
2015
+ * From the term in the clause we create a token set which will then
2016
+ * be used to intersect the indexes token set to get a list of terms
2017
+ * to lookup in the inverted index
2018
+ */
2019
+ var termTokenSet = lunr.TokenSet.fromClause(clause),
2020
+ expandedTerms = this.tokenSet.intersect(termTokenSet).toArray()
2021
+
2022
+ /*
2023
+ * If a term marked as required does not exist in the tokenSet it is
2024
+ * impossible for the search to return any matches. We set all the field
2025
+ * scoped required matches set to empty and stop examining any further
2026
+ * clauses.
2027
+ */
2028
+ if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) {
2029
+ for (var k = 0; k < clause.fields.length; k++) {
2030
+ var field = clause.fields[k]
2031
+ requiredMatches[field] = lunr.Set.empty
2032
+ }
2033
+
2034
+ break
2035
+ }
2036
+
2037
+ for (var j = 0; j < expandedTerms.length; j++) {
2038
+ /*
2039
+ * For each term get the posting and termIndex, this is required for
2040
+ * building the query vector.
2041
+ */
2042
+ var expandedTerm = expandedTerms[j],
2043
+ posting = this.invertedIndex[expandedTerm],
2044
+ termIndex = posting._index
2045
+
2046
+ for (var k = 0; k < clause.fields.length; k++) {
2047
+ /*
2048
+ * For each field that this query term is scoped by (by default
2049
+ * all fields are in scope) we need to get all the document refs
2050
+ * that have this term in that field.
2051
+ *
2052
+ * The posting is the entry in the invertedIndex for the matching
2053
+ * term from above.
2054
+ */
2055
+ var field = clause.fields[k],
2056
+ fieldPosting = posting[field],
2057
+ matchingDocumentRefs = Object.keys(fieldPosting),
2058
+ termField = expandedTerm + "/" + field,
2059
+ matchingDocumentsSet = new lunr.Set(matchingDocumentRefs)
2060
+
2061
+ /*
2062
+ * if the presence of this term is required ensure that the matching
2063
+ * documents are added to the set of required matches for this clause.
2064
+ *
2065
+ */
2066
+ if (clause.presence == lunr.Query.presence.REQUIRED) {
2067
+ clauseMatches = clauseMatches.union(matchingDocumentsSet)
2068
+
2069
+ if (requiredMatches[field] === undefined) {
2070
+ requiredMatches[field] = lunr.Set.complete
2071
+ }
2072
+ }
2073
+
2074
+ /*
2075
+ * if the presence of this term is prohibited ensure that the matching
2076
+ * documents are added to the set of prohibited matches for this field,
2077
+ * creating that set if it does not yet exist.
2078
+ */
2079
+ if (clause.presence == lunr.Query.presence.PROHIBITED) {
2080
+ if (prohibitedMatches[field] === undefined) {
2081
+ prohibitedMatches[field] = lunr.Set.empty
2082
+ }
2083
+
2084
+ prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet)
2085
+
2086
+ /*
2087
+ * Prohibited matches should not be part of the query vector used for
2088
+ * similarity scoring and no metadata should be extracted so we continue
2089
+ * to the next field
2090
+ */
2091
+ continue
2092
+ }
2093
+
2094
+ /*
2095
+ * The query field vector is populated using the termIndex found for
2096
+ * the term and a unit value with the appropriate boost applied.
2097
+ * Using upsert because there could already be an entry in the vector
2098
+ * for the term we are working with. In that case we just add the scores
2099
+ * together.
2100
+ */
2101
+ queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b })
2102
+
2103
+ /**
2104
+ * If we've already seen this term, field combo then we've already collected
2105
+ * the matching documents and metadata, no need to go through all that again
2106
+ */
2107
+ if (termFieldCache[termField]) {
2108
+ continue
2109
+ }
2110
+
2111
+ for (var l = 0; l < matchingDocumentRefs.length; l++) {
2112
+ /*
2113
+ * All metadata for this term/field/document triple
2114
+ * are then extracted and collected into an instance
2115
+ * of lunr.MatchData ready to be returned in the query
2116
+ * results
2117
+ */
2118
+ var matchingDocumentRef = matchingDocumentRefs[l],
2119
+ matchingFieldRef = new lunr.FieldRef (matchingDocumentRef, field),
2120
+ metadata = fieldPosting[matchingDocumentRef],
2121
+ fieldMatch
2122
+
2123
+ if ((fieldMatch = matchingFields[matchingFieldRef]) === undefined) {
2124
+ matchingFields[matchingFieldRef] = new lunr.MatchData (expandedTerm, field, metadata)
2125
+ } else {
2126
+ fieldMatch.add(expandedTerm, field, metadata)
2127
+ }
2128
+
2129
+ }
2130
+
2131
+ termFieldCache[termField] = true
2132
+ }
2133
+ }
2134
+ }
2135
+
2136
+ /**
2137
+ * If the presence was required we need to update the requiredMatches field sets.
2138
+ * We do this after all fields for the term have collected their matches because
2139
+ * the clause terms presence is required in _any_ of the fields not _all_ of the
2140
+ * fields.
2141
+ */
2142
+ if (clause.presence === lunr.Query.presence.REQUIRED) {
2143
+ for (var k = 0; k < clause.fields.length; k++) {
2144
+ var field = clause.fields[k]
2145
+ requiredMatches[field] = requiredMatches[field].intersect(clauseMatches)
2146
+ }
2147
+ }
2148
+ }
2149
+
2150
+ /**
2151
+ * Need to combine the field scoped required and prohibited
2152
+ * matching documents into a global set of required and prohibited
2153
+ * matches
2154
+ */
2155
+ var allRequiredMatches = lunr.Set.complete,
2156
+ allProhibitedMatches = lunr.Set.empty
2157
+
2158
+ for (var i = 0; i < this.fields.length; i++) {
2159
+ var field = this.fields[i]
2160
+
2161
+ if (requiredMatches[field]) {
2162
+ allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field])
2163
+ }
2164
+
2165
+ if (prohibitedMatches[field]) {
2166
+ allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field])
2167
+ }
2168
+ }
2169
+
2170
+ var matchingFieldRefs = Object.keys(matchingFields),
2171
+ results = [],
2172
+ matches = Object.create(null)
2173
+
2174
+ /*
2175
+ * If the query is negated (contains only prohibited terms)
2176
+ * we need to get _all_ fieldRefs currently existing in the
2177
+ * index. This is only done when we know that the query is
2178
+ * entirely prohibited terms to avoid any cost of getting all
2179
+ * fieldRefs unnecessarily.
2180
+ *
2181
+ * Additionally, blank MatchData must be created to correctly
2182
+ * populate the results.
2183
+ */
2184
+ if (query.isNegated()) {
2185
+ matchingFieldRefs = Object.keys(this.fieldVectors)
2186
+
2187
+ for (var i = 0; i < matchingFieldRefs.length; i++) {
2188
+ var matchingFieldRef = matchingFieldRefs[i]
2189
+ var fieldRef = lunr.FieldRef.fromString(matchingFieldRef)
2190
+ matchingFields[matchingFieldRef] = new lunr.MatchData
2191
+ }
2192
+ }
2193
+
2194
+ for (var i = 0; i < matchingFieldRefs.length; i++) {
2195
+ /*
2196
+ * Currently we have document fields that match the query, but we
2197
+ * need to return documents. The matchData and scores are combined
2198
+ * from multiple fields belonging to the same document.
2199
+ *
2200
+ * Scores are calculated by field, using the query vectors created
2201
+ * above, and combined into a final document score using addition.
2202
+ */
2203
+ var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]),
2204
+ docRef = fieldRef.docRef
2205
+
2206
+ if (!allRequiredMatches.contains(docRef)) {
2207
+ continue
2208
+ }
2209
+
2210
+ if (allProhibitedMatches.contains(docRef)) {
2211
+ continue
2212
+ }
2213
+
2214
+ var fieldVector = this.fieldVectors[fieldRef],
2215
+ score = queryVectors[fieldRef.fieldName].similarity(fieldVector),
2216
+ docMatch
2217
+
2218
+ if ((docMatch = matches[docRef]) !== undefined) {
2219
+ docMatch.score += score
2220
+ docMatch.matchData.combine(matchingFields[fieldRef])
2221
+ } else {
2222
+ var match = {
2223
+ ref: docRef,
2224
+ score: score,
2225
+ matchData: matchingFields[fieldRef]
2226
+ }
2227
+ matches[docRef] = match
2228
+ results.push(match)
2229
+ }
2230
+ }
2231
+
2232
+ /*
2233
+ * Sort the results objects by score, highest first.
2234
+ */
2235
+ return results.sort(function (a, b) {
2236
+ return b.score - a.score
2237
+ })
2238
+ }
2239
+
2240
+ /**
2241
+ * Prepares the index for JSON serialization.
2242
+ *
2243
+ * The schema for this JSON blob will be described in a
2244
+ * separate JSON schema file.
2245
+ *
2246
+ * @returns {Object}
2247
+ */
2248
+ lunr.Index.prototype.toJSON = function () {
2249
+ var invertedIndex = Object.keys(this.invertedIndex)
2250
+ .sort()
2251
+ .map(function (term) {
2252
+ return [term, this.invertedIndex[term]]
2253
+ }, this)
2254
+
2255
+ var fieldVectors = Object.keys(this.fieldVectors)
2256
+ .map(function (ref) {
2257
+ return [ref, this.fieldVectors[ref].toJSON()]
2258
+ }, this)
2259
+
2260
+ return {
2261
+ version: lunr.version,
2262
+ fields: this.fields,
2263
+ fieldVectors: fieldVectors,
2264
+ invertedIndex: invertedIndex,
2265
+ pipeline: this.pipeline.toJSON()
2266
+ }
2267
+ }
2268
+
2269
+ /**
2270
+ * Loads a previously serialized lunr.Index
2271
+ *
2272
+ * @param {Object} serializedIndex - A previously serialized lunr.Index
2273
+ * @returns {lunr.Index}
2274
+ */
2275
+ lunr.Index.load = function (serializedIndex) {
2276
+ var attrs = {},
2277
+ fieldVectors = {},
2278
+ serializedVectors = serializedIndex.fieldVectors,
2279
+ invertedIndex = Object.create(null),
2280
+ serializedInvertedIndex = serializedIndex.invertedIndex,
2281
+ tokenSetBuilder = new lunr.TokenSet.Builder,
2282
+ pipeline = lunr.Pipeline.load(serializedIndex.pipeline)
2283
+
2284
+ if (serializedIndex.version != lunr.version) {
2285
+ lunr.utils.warn("Version mismatch when loading serialised index. Current version of lunr '" + lunr.version + "' does not match serialized index '" + serializedIndex.version + "'")
2286
+ }
2287
+
2288
+ for (var i = 0; i < serializedVectors.length; i++) {
2289
+ var tuple = serializedVectors[i],
2290
+ ref = tuple[0],
2291
+ elements = tuple[1]
2292
+
2293
+ fieldVectors[ref] = new lunr.Vector(elements)
2294
+ }
2295
+
2296
+ for (var i = 0; i < serializedInvertedIndex.length; i++) {
2297
+ var tuple = serializedInvertedIndex[i],
2298
+ term = tuple[0],
2299
+ posting = tuple[1]
2300
+
2301
+ tokenSetBuilder.insert(term)
2302
+ invertedIndex[term] = posting
2303
+ }
2304
+
2305
+ tokenSetBuilder.finish()
2306
+
2307
+ attrs.fields = serializedIndex.fields
2308
+
2309
+ attrs.fieldVectors = fieldVectors
2310
+ attrs.invertedIndex = invertedIndex
2311
+ attrs.tokenSet = tokenSetBuilder.root
2312
+ attrs.pipeline = pipeline
2313
+
2314
+ return new lunr.Index(attrs)
2315
+ }
2316
+ /*!
2317
+ * lunr.Builder
2318
+ * Copyright (C) 2019 Oliver Nightingale
2319
+ */
2320
+
2321
+ /**
2322
+ * lunr.Builder performs indexing on a set of documents and
2323
+ * returns instances of lunr.Index ready for querying.
2324
+ *
2325
+ * All configuration of the index is done via the builder, the
2326
+ * fields to index, the document reference, the text processing
2327
+ * pipeline and document scoring parameters are all set on the
2328
+ * builder before indexing.
2329
+ *
2330
+ * @constructor
2331
+ * @property {string} _ref - Internal reference to the document reference field.
2332
+ * @property {string[]} _fields - Internal reference to the document fields to index.
2333
+ * @property {object} invertedIndex - The inverted index maps terms to document fields.
2334
+ * @property {object} documentTermFrequencies - Keeps track of document term frequencies.
2335
+ * @property {object} documentLengths - Keeps track of the length of documents added to the index.
2336
+ * @property {lunr.tokenizer} tokenizer - Function for splitting strings into tokens for indexing.
2337
+ * @property {lunr.Pipeline} pipeline - The pipeline performs text processing on tokens before indexing.
2338
+ * @property {lunr.Pipeline} searchPipeline - A pipeline for processing search terms before querying the index.
2339
+ * @property {number} documentCount - Keeps track of the total number of documents indexed.
2340
+ * @property {number} _b - A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75.
2341
+ * @property {number} _k1 - A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2.
2342
+ * @property {number} termIndex - A counter incremented for each unique term, used to identify a terms position in the vector space.
2343
+ * @property {array} metadataWhitelist - A list of metadata keys that have been whitelisted for entry in the index.
2344
+ */
2345
+ lunr.Builder = function () {
2346
+ this._ref = "id"
2347
+ this._fields = Object.create(null)
2348
+ this._documents = Object.create(null)
2349
+ this.invertedIndex = Object.create(null)
2350
+ this.fieldTermFrequencies = {}
2351
+ this.fieldLengths = {}
2352
+ this.tokenizer = lunr.tokenizer
2353
+ this.pipeline = new lunr.Pipeline
2354
+ this.searchPipeline = new lunr.Pipeline
2355
+ this.documentCount = 0
2356
+ this._b = 0.75
2357
+ this._k1 = 1.2
2358
+ this.termIndex = 0
2359
+ this.metadataWhitelist = []
2360
+ }
2361
+
2362
+ /**
2363
+ * Sets the document field used as the document reference. Every document must have this field.
2364
+ * The type of this field in the document should be a string, if it is not a string it will be
2365
+ * coerced into a string by calling toString.
2366
+ *
2367
+ * The default ref is 'id'.
2368
+ *
2369
+ * The ref should _not_ be changed during indexing, it should be set before any documents are
2370
+ * added to the index. Changing it during indexing can lead to inconsistent results.
2371
+ *
2372
+ * @param {string} ref - The name of the reference field in the document.
2373
+ */
2374
+ lunr.Builder.prototype.ref = function (ref) {
2375
+ this._ref = ref
2376
+ }
2377
+
2378
+ /**
2379
+ * A function that is used to extract a field from a document.
2380
+ *
2381
+ * Lunr expects a field to be at the top level of a document, if however the field
2382
+ * is deeply nested within a document an extractor function can be used to extract
2383
+ * the right field for indexing.
2384
+ *
2385
+ * @callback fieldExtractor
2386
+ * @param {object} doc - The document being added to the index.
2387
+ * @returns {?(string|object|object[])} obj - The object that will be indexed for this field.
2388
+ * @example <caption>Extracting a nested field</caption>
2389
+ * function (doc) { return doc.nested.field }
2390
+ */
2391
+
2392
+ /**
2393
+ * Adds a field to the list of document fields that will be indexed. Every document being
2394
+ * indexed should have this field. Null values for this field in indexed documents will
2395
+ * not cause errors but will limit the chance of that document being retrieved by searches.
2396
+ *
2397
+ * All fields should be added before adding documents to the index. Adding fields after
2398
+ * a document has been indexed will have no effect on already indexed documents.
2399
+ *
2400
+ * Fields can be boosted at build time. This allows terms within that field to have more
2401
+ * importance when ranking search results. Use a field boost to specify that matches within
2402
+ * one field are more important than other fields.
2403
+ *
2404
+ * @param {string} fieldName - The name of a field to index in all documents.
2405
+ * @param {object} attributes - Optional attributes associated with this field.
2406
+ * @param {number} [attributes.boost=1] - Boost applied to all terms within this field.
2407
+ * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document.
2408
+ * @throws {RangeError} fieldName cannot contain unsupported characters '/'
2409
+ */
2410
+ lunr.Builder.prototype.field = function (fieldName, attributes) {
2411
+ if (/\//.test(fieldName)) {
2412
+ throw new RangeError ("Field '" + fieldName + "' contains illegal character '/'")
2413
+ }
2414
+
2415
+ this._fields[fieldName] = attributes || {}
2416
+ }
2417
+
2418
+ /**
2419
+ * A parameter to tune the amount of field length normalisation that is applied when
2420
+ * calculating relevance scores. A value of 0 will completely disable any normalisation
2421
+ * and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b
2422
+ * will be clamped to the range 0 - 1.
2423
+ *
2424
+ * @param {number} number - The value to set for this tuning parameter.
2425
+ */
2426
+ lunr.Builder.prototype.b = function (number) {
2427
+ if (number < 0) {
2428
+ this._b = 0
2429
+ } else if (number > 1) {
2430
+ this._b = 1
2431
+ } else {
2432
+ this._b = number
2433
+ }
2434
+ }
2435
+
2436
+ /**
2437
+ * A parameter that controls the speed at which a rise in term frequency results in term
2438
+ * frequency saturation. The default value is 1.2. Setting this to a higher value will give
2439
+ * slower saturation levels, a lower value will result in quicker saturation.
2440
+ *
2441
+ * @param {number} number - The value to set for this tuning parameter.
2442
+ */
2443
+ lunr.Builder.prototype.k1 = function (number) {
2444
+ this._k1 = number
2445
+ }
2446
+
2447
+ /**
2448
+ * Adds a document to the index.
2449
+ *
2450
+ * Before adding fields to the index the index should have been fully setup, with the document
2451
+ * ref and all fields to index already having been specified.
2452
+ *
2453
+ * The document must have a field name as specified by the ref (by default this is 'id') and
2454
+ * it should have all fields defined for indexing, though null or undefined values will not
2455
+ * cause errors.
2456
+ *
2457
+ * Entire documents can be boosted at build time. Applying a boost to a document indicates that
2458
+ * this document should rank higher in search results than other documents.
2459
+ *
2460
+ * @param {object} doc - The document to add to the index.
2461
+ * @param {object} attributes - Optional attributes associated with this document.
2462
+ * @param {number} [attributes.boost=1] - Boost applied to all terms within this document.
2463
+ */
2464
+ lunr.Builder.prototype.add = function (doc, attributes) {
2465
+ var docRef = doc[this._ref],
2466
+ fields = Object.keys(this._fields)
2467
+
2468
+ this._documents[docRef] = attributes || {}
2469
+ this.documentCount += 1
2470
+
2471
+ for (var i = 0; i < fields.length; i++) {
2472
+ var fieldName = fields[i],
2473
+ extractor = this._fields[fieldName].extractor,
2474
+ field = extractor ? extractor(doc) : doc[fieldName],
2475
+ tokens = this.tokenizer(field, {
2476
+ fields: [fieldName]
2477
+ }),
2478
+ terms = this.pipeline.run(tokens),
2479
+ fieldRef = new lunr.FieldRef (docRef, fieldName),
2480
+ fieldTerms = Object.create(null)
2481
+
2482
+ this.fieldTermFrequencies[fieldRef] = fieldTerms
2483
+ this.fieldLengths[fieldRef] = 0
2484
+
2485
+ // store the length of this field for this document
2486
+ this.fieldLengths[fieldRef] += terms.length
2487
+
2488
+ // calculate term frequencies for this field
2489
+ for (var j = 0; j < terms.length; j++) {
2490
+ var term = terms[j]
2491
+
2492
+ if (fieldTerms[term] == undefined) {
2493
+ fieldTerms[term] = 0
2494
+ }
2495
+
2496
+ fieldTerms[term] += 1
2497
+
2498
+ // add to inverted index
2499
+ // create an initial posting if one doesn't exist
2500
+ if (this.invertedIndex[term] == undefined) {
2501
+ var posting = Object.create(null)
2502
+ posting["_index"] = this.termIndex
2503
+ this.termIndex += 1
2504
+
2505
+ for (var k = 0; k < fields.length; k++) {
2506
+ posting[fields[k]] = Object.create(null)
2507
+ }
2508
+
2509
+ this.invertedIndex[term] = posting
2510
+ }
2511
+
2512
+ // add an entry for this term/fieldName/docRef to the invertedIndex
2513
+ if (this.invertedIndex[term][fieldName][docRef] == undefined) {
2514
+ this.invertedIndex[term][fieldName][docRef] = Object.create(null)
2515
+ }
2516
+
2517
+ // store all whitelisted metadata about this token in the
2518
+ // inverted index
2519
+ for (var l = 0; l < this.metadataWhitelist.length; l++) {
2520
+ var metadataKey = this.metadataWhitelist[l],
2521
+ metadata = term.metadata[metadataKey]
2522
+
2523
+ if (this.invertedIndex[term][fieldName][docRef][metadataKey] == undefined) {
2524
+ this.invertedIndex[term][fieldName][docRef][metadataKey] = []
2525
+ }
2526
+
2527
+ this.invertedIndex[term][fieldName][docRef][metadataKey].push(metadata)
2528
+ }
2529
+ }
2530
+
2531
+ }
2532
+ }
2533
+
2534
+ /**
2535
+ * Calculates the average document length for this index
2536
+ *
2537
+ * @private
2538
+ */
2539
+ lunr.Builder.prototype.calculateAverageFieldLengths = function () {
2540
+
2541
+ var fieldRefs = Object.keys(this.fieldLengths),
2542
+ numberOfFields = fieldRefs.length,
2543
+ accumulator = {},
2544
+ documentsWithField = {}
2545
+
2546
+ for (var i = 0; i < numberOfFields; i++) {
2547
+ var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
2548
+ field = fieldRef.fieldName
2549
+
2550
+ documentsWithField[field] || (documentsWithField[field] = 0)
2551
+ documentsWithField[field] += 1
2552
+
2553
+ accumulator[field] || (accumulator[field] = 0)
2554
+ accumulator[field] += this.fieldLengths[fieldRef]
2555
+ }
2556
+
2557
+ var fields = Object.keys(this._fields)
2558
+
2559
+ for (var i = 0; i < fields.length; i++) {
2560
+ var fieldName = fields[i]
2561
+ accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName]
2562
+ }
2563
+
2564
+ this.averageFieldLength = accumulator
2565
+ }
2566
+
2567
+ /**
2568
+ * Builds a vector space model of every document using lunr.Vector
2569
+ *
2570
+ * @private
2571
+ */
2572
+ lunr.Builder.prototype.createFieldVectors = function () {
2573
+ var fieldVectors = {},
2574
+ fieldRefs = Object.keys(this.fieldTermFrequencies),
2575
+ fieldRefsLength = fieldRefs.length,
2576
+ termIdfCache = Object.create(null)
2577
+
2578
+ for (var i = 0; i < fieldRefsLength; i++) {
2579
+ var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
2580
+ fieldName = fieldRef.fieldName,
2581
+ fieldLength = this.fieldLengths[fieldRef],
2582
+ fieldVector = new lunr.Vector,
2583
+ termFrequencies = this.fieldTermFrequencies[fieldRef],
2584
+ terms = Object.keys(termFrequencies),
2585
+ termsLength = terms.length
2586
+
2587
+
2588
+ var fieldBoost = this._fields[fieldName].boost || 1,
2589
+ docBoost = this._documents[fieldRef.docRef].boost || 1
2590
+
2591
+ for (var j = 0; j < termsLength; j++) {
2592
+ var term = terms[j],
2593
+ tf = termFrequencies[term],
2594
+ termIndex = this.invertedIndex[term]._index,
2595
+ idf, score, scoreWithPrecision
2596
+
2597
+ if (termIdfCache[term] === undefined) {
2598
+ idf = lunr.idf(this.invertedIndex[term], this.documentCount)
2599
+ termIdfCache[term] = idf
2600
+ } else {
2601
+ idf = termIdfCache[term]
2602
+ }
2603
+
2604
+ score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf)
2605
+ score *= fieldBoost
2606
+ score *= docBoost
2607
+ scoreWithPrecision = Math.round(score * 1000) / 1000
2608
+ // Converts 1.23456789 to 1.234.
2609
+ // Reducing the precision so that the vectors take up less
2610
+ // space when serialised. Doing it now so that they behave
2611
+ // the same before and after serialisation. Also, this is
2612
+ // the fastest approach to reducing a number's precision in
2613
+ // JavaScript.
2614
+
2615
+ fieldVector.insert(termIndex, scoreWithPrecision)
2616
+ }
2617
+
2618
+ fieldVectors[fieldRef] = fieldVector
2619
+ }
2620
+
2621
+ this.fieldVectors = fieldVectors
2622
+ }
2623
+
2624
+ /**
2625
+ * Creates a token set of all tokens in the index using lunr.TokenSet
2626
+ *
2627
+ * @private
2628
+ */
2629
+ lunr.Builder.prototype.createTokenSet = function () {
2630
+ this.tokenSet = lunr.TokenSet.fromArray(
2631
+ Object.keys(this.invertedIndex).sort()
2632
+ )
2633
+ }
2634
+
2635
+ /**
2636
+ * Builds the index, creating an instance of lunr.Index.
2637
+ *
2638
+ * This completes the indexing process and should only be called
2639
+ * once all documents have been added to the index.
2640
+ *
2641
+ * @returns {lunr.Index}
2642
+ */
2643
+ lunr.Builder.prototype.build = function () {
2644
+ this.calculateAverageFieldLengths()
2645
+ this.createFieldVectors()
2646
+ this.createTokenSet()
2647
+
2648
+ return new lunr.Index({
2649
+ invertedIndex: this.invertedIndex,
2650
+ fieldVectors: this.fieldVectors,
2651
+ tokenSet: this.tokenSet,
2652
+ fields: Object.keys(this._fields),
2653
+ pipeline: this.searchPipeline
2654
+ })
2655
+ }
2656
+
2657
+ /**
2658
+ * Applies a plugin to the index builder.
2659
+ *
2660
+ * A plugin is a function that is called with the index builder as its context.
2661
+ * Plugins can be used to customise or extend the behaviour of the index
2662
+ * in some way. A plugin is just a function, that encapsulated the custom
2663
+ * behaviour that should be applied when building the index.
2664
+ *
2665
+ * The plugin function will be called with the index builder as its argument, additional
2666
+ * arguments can also be passed when calling use. The function will be called
2667
+ * with the index builder as its context.
2668
+ *
2669
+ * @param {Function} plugin The plugin to apply.
2670
+ */
2671
+ lunr.Builder.prototype.use = function (fn) {
2672
+ var args = Array.prototype.slice.call(arguments, 1)
2673
+ args.unshift(this)
2674
+ fn.apply(this, args)
2675
+ }
2676
+ /**
2677
+ * Contains and collects metadata about a matching document.
2678
+ * A single instance of lunr.MatchData is returned as part of every
2679
+ * lunr.Index~Result.
2680
+ *
2681
+ * @constructor
2682
+ * @param {string} term - The term this match data is associated with
2683
+ * @param {string} field - The field in which the term was found
2684
+ * @param {object} metadata - The metadata recorded about this term in this field
2685
+ * @property {object} metadata - A cloned collection of metadata associated with this document.
2686
+ * @see {@link lunr.Index~Result}
2687
+ */
2688
+ lunr.MatchData = function (term, field, metadata) {
2689
+ var clonedMetadata = Object.create(null),
2690
+ metadataKeys = Object.keys(metadata || {})
2691
+
2692
+ // Cloning the metadata to prevent the original
2693
+ // being mutated during match data combination.
2694
+ // Metadata is kept in an array within the inverted
2695
+ // index so cloning the data can be done with
2696
+ // Array#slice
2697
+ for (var i = 0; i < metadataKeys.length; i++) {
2698
+ var key = metadataKeys[i]
2699
+ clonedMetadata[key] = metadata[key].slice()
2700
+ }
2701
+
2702
+ this.metadata = Object.create(null)
2703
+
2704
+ if (term !== undefined) {
2705
+ this.metadata[term] = Object.create(null)
2706
+ this.metadata[term][field] = clonedMetadata
2707
+ }
2708
+ }
2709
+
2710
+ /**
2711
+ * An instance of lunr.MatchData will be created for every term that matches a
2712
+ * document. However only one instance is required in a lunr.Index~Result. This
2713
+ * method combines metadata from another instance of lunr.MatchData with this
2714
+ * objects metadata.
2715
+ *
2716
+ * @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one.
2717
+ * @see {@link lunr.Index~Result}
2718
+ */
2719
+ lunr.MatchData.prototype.combine = function (otherMatchData) {
2720
+ var terms = Object.keys(otherMatchData.metadata)
2721
+
2722
+ for (var i = 0; i < terms.length; i++) {
2723
+ var term = terms[i],
2724
+ fields = Object.keys(otherMatchData.metadata[term])
2725
+
2726
+ if (this.metadata[term] == undefined) {
2727
+ this.metadata[term] = Object.create(null)
2728
+ }
2729
+
2730
+ for (var j = 0; j < fields.length; j++) {
2731
+ var field = fields[j],
2732
+ keys = Object.keys(otherMatchData.metadata[term][field])
2733
+
2734
+ if (this.metadata[term][field] == undefined) {
2735
+ this.metadata[term][field] = Object.create(null)
2736
+ }
2737
+
2738
+ for (var k = 0; k < keys.length; k++) {
2739
+ var key = keys[k]
2740
+
2741
+ if (this.metadata[term][field][key] == undefined) {
2742
+ this.metadata[term][field][key] = otherMatchData.metadata[term][field][key]
2743
+ } else {
2744
+ this.metadata[term][field][key] = this.metadata[term][field][key].concat(otherMatchData.metadata[term][field][key])
2745
+ }
2746
+
2747
+ }
2748
+ }
2749
+ }
2750
+ }
2751
+
2752
+ /**
2753
+ * Add metadata for a term/field pair to this instance of match data.
2754
+ *
2755
+ * @param {string} term - The term this match data is associated with
2756
+ * @param {string} field - The field in which the term was found
2757
+ * @param {object} metadata - The metadata recorded about this term in this field
2758
+ */
2759
+ lunr.MatchData.prototype.add = function (term, field, metadata) {
2760
+ if (!(term in this.metadata)) {
2761
+ this.metadata[term] = Object.create(null)
2762
+ this.metadata[term][field] = metadata
2763
+ return
2764
+ }
2765
+
2766
+ if (!(field in this.metadata[term])) {
2767
+ this.metadata[term][field] = metadata
2768
+ return
2769
+ }
2770
+
2771
+ var metadataKeys = Object.keys(metadata)
2772
+
2773
+ for (var i = 0; i < metadataKeys.length; i++) {
2774
+ var key = metadataKeys[i]
2775
+
2776
+ if (key in this.metadata[term][field]) {
2777
+ this.metadata[term][field][key] = this.metadata[term][field][key].concat(metadata[key])
2778
+ } else {
2779
+ this.metadata[term][field][key] = metadata[key]
2780
+ }
2781
+ }
2782
+ }
2783
+ /**
2784
+ * A lunr.Query provides a programmatic way of defining queries to be performed
2785
+ * against a {@link lunr.Index}.
2786
+ *
2787
+ * Prefer constructing a lunr.Query using the {@link lunr.Index#query} method
2788
+ * so the query object is pre-initialized with the right index fields.
2789
+ *
2790
+ * @constructor
2791
+ * @property {lunr.Query~Clause[]} clauses - An array of query clauses.
2792
+ * @property {string[]} allFields - An array of all available fields in a lunr.Index.
2793
+ */
2794
+ lunr.Query = function (allFields) {
2795
+ this.clauses = []
2796
+ this.allFields = allFields
2797
+ }
2798
+
2799
+ /**
2800
+ * Constants for indicating what kind of automatic wildcard insertion will be used when constructing a query clause.
2801
+ *
2802
+ * This allows wildcards to be added to the beginning and end of a term without having to manually do any string
2803
+ * concatenation.
2804
+ *
2805
+ * The wildcard constants can be bitwise combined to select both leading and trailing wildcards.
2806
+ *
2807
+ * @constant
2808
+ * @default
2809
+ * @property {number} wildcard.NONE - The term will have no wildcards inserted, this is the default behaviour
2810
+ * @property {number} wildcard.LEADING - Prepend the term with a wildcard, unless a leading wildcard already exists
2811
+ * @property {number} wildcard.TRAILING - Append a wildcard to the term, unless a trailing wildcard already exists
2812
+ * @see lunr.Query~Clause
2813
+ * @see lunr.Query#clause
2814
+ * @see lunr.Query#term
2815
+ * @example <caption>query term with trailing wildcard</caption>
2816
+ * query.term('foo', { wildcard: lunr.Query.wildcard.TRAILING })
2817
+ * @example <caption>query term with leading and trailing wildcard</caption>
2818
+ * query.term('foo', {
2819
+ * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING
2820
+ * })
2821
+ */
2822
+
2823
+ lunr.Query.wildcard = new String ("*")
2824
+ lunr.Query.wildcard.NONE = 0
2825
+ lunr.Query.wildcard.LEADING = 1
2826
+ lunr.Query.wildcard.TRAILING = 2
2827
+
2828
+ /**
2829
+ * Constants for indicating what kind of presence a term must have in matching documents.
2830
+ *
2831
+ * @constant
2832
+ * @enum {number}
2833
+ * @see lunr.Query~Clause
2834
+ * @see lunr.Query#clause
2835
+ * @see lunr.Query#term
2836
+ * @example <caption>query term with required presence</caption>
2837
+ * query.term('foo', { presence: lunr.Query.presence.REQUIRED })
2838
+ */
2839
+ lunr.Query.presence = {
2840
+ /**
2841
+ * Term's presence in a document is optional, this is the default value.
2842
+ */
2843
+ OPTIONAL: 1,
2844
+
2845
+ /**
2846
+ * Term's presence in a document is required, documents that do not contain
2847
+ * this term will not be returned.
2848
+ */
2849
+ REQUIRED: 2,
2850
+
2851
+ /**
2852
+ * Term's presence in a document is prohibited, documents that do contain
2853
+ * this term will not be returned.
2854
+ */
2855
+ PROHIBITED: 3
2856
+ }
2857
+
2858
+ /**
2859
+ * A single clause in a {@link lunr.Query} contains a term and details on how to
2860
+ * match that term against a {@link lunr.Index}.
2861
+ *
2862
+ * @typedef {Object} lunr.Query~Clause
2863
+ * @property {string[]} fields - The fields in an index this clause should be matched against.
2864
+ * @property {number} [boost=1] - Any boost that should be applied when matching this clause.
2865
+ * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be.
2866
+ * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline.
2867
+ * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended.
2868
+ * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents.
2869
+ */
2870
+
2871
+ /**
2872
+ * Adds a {@link lunr.Query~Clause} to this query.
2873
+ *
2874
+ * Unless the clause contains the fields to be matched all fields will be matched. In addition
2875
+ * a default boost of 1 is applied to the clause.
2876
+ *
2877
+ * @param {lunr.Query~Clause} clause - The clause to add to this query.
2878
+ * @see lunr.Query~Clause
2879
+ * @returns {lunr.Query}
2880
+ */
2881
+ lunr.Query.prototype.clause = function (clause) {
2882
+ if (!('fields' in clause)) {
2883
+ clause.fields = this.allFields
2884
+ }
2885
+
2886
+ if (!('boost' in clause)) {
2887
+ clause.boost = 1
2888
+ }
2889
+
2890
+ if (!('usePipeline' in clause)) {
2891
+ clause.usePipeline = true
2892
+ }
2893
+
2894
+ if (!('wildcard' in clause)) {
2895
+ clause.wildcard = lunr.Query.wildcard.NONE
2896
+ }
2897
+
2898
+ if ((clause.wildcard & lunr.Query.wildcard.LEADING) && (clause.term.charAt(0) != lunr.Query.wildcard)) {
2899
+ clause.term = "*" + clause.term
2900
+ }
2901
+
2902
+ if ((clause.wildcard & lunr.Query.wildcard.TRAILING) && (clause.term.slice(-1) != lunr.Query.wildcard)) {
2903
+ clause.term = "" + clause.term + "*"
2904
+ }
2905
+
2906
+ if (!('presence' in clause)) {
2907
+ clause.presence = lunr.Query.presence.OPTIONAL
2908
+ }
2909
+
2910
+ this.clauses.push(clause)
2911
+
2912
+ return this
2913
+ }
2914
+
2915
+ /**
2916
+ * A negated query is one in which every clause has a presence of
2917
+ * prohibited. These queries require some special processing to return
2918
+ * the expected results.
2919
+ *
2920
+ * @returns boolean
2921
+ */
2922
+ lunr.Query.prototype.isNegated = function () {
2923
+ for (var i = 0; i < this.clauses.length; i++) {
2924
+ if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) {
2925
+ return false
2926
+ }
2927
+ }
2928
+
2929
+ return true
2930
+ }
2931
+
2932
+ /**
2933
+ * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause}
2934
+ * to the list of clauses that make up this query.
2935
+ *
2936
+ * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion
2937
+ * to a token or token-like string should be done before calling this method.
2938
+ *
2939
+ * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an
2940
+ * array, each term in the array will share the same options.
2941
+ *
2942
+ * @param {object|object[]} term - The term(s) to add to the query.
2943
+ * @param {object} [options] - Any additional properties to add to the query clause.
2944
+ * @returns {lunr.Query}
2945
+ * @see lunr.Query#clause
2946
+ * @see lunr.Query~Clause
2947
+ * @example <caption>adding a single term to a query</caption>
2948
+ * query.term("foo")
2949
+ * @example <caption>adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard</caption>
2950
+ * query.term("foo", {
2951
+ * fields: ["title"],
2952
+ * boost: 10,
2953
+ * wildcard: lunr.Query.wildcard.TRAILING
2954
+ * })
2955
+ * @example <caption>using lunr.tokenizer to convert a string to tokens before using them as terms</caption>
2956
+ * query.term(lunr.tokenizer("foo bar"))
2957
+ */
2958
+ lunr.Query.prototype.term = function (term, options) {
2959
+ if (Array.isArray(term)) {
2960
+ term.forEach(function (t) { this.term(t, lunr.utils.clone(options)) }, this)
2961
+ return this
2962
+ }
2963
+
2964
+ var clause = options || {}
2965
+ clause.term = term.toString()
2966
+
2967
+ this.clause(clause)
2968
+
2969
+ return this
2970
+ }
2971
+ lunr.QueryParseError = function (message, start, end) {
2972
+ this.name = "QueryParseError"
2973
+ this.message = message
2974
+ this.start = start
2975
+ this.end = end
2976
+ }
2977
+
2978
+ lunr.QueryParseError.prototype = new Error
2979
+ lunr.QueryLexer = function (str) {
2980
+ this.lexemes = []
2981
+ this.str = str
2982
+ this.length = str.length
2983
+ this.pos = 0
2984
+ this.start = 0
2985
+ this.escapeCharPositions = []
2986
+ }
2987
+
2988
+ lunr.QueryLexer.prototype.run = function () {
2989
+ var state = lunr.QueryLexer.lexText
2990
+
2991
+ while (state) {
2992
+ state = state(this)
2993
+ }
2994
+ }
2995
+
2996
+ lunr.QueryLexer.prototype.sliceString = function () {
2997
+ var subSlices = [],
2998
+ sliceStart = this.start,
2999
+ sliceEnd = this.pos
3000
+
3001
+ for (var i = 0; i < this.escapeCharPositions.length; i++) {
3002
+ sliceEnd = this.escapeCharPositions[i]
3003
+ subSlices.push(this.str.slice(sliceStart, sliceEnd))
3004
+ sliceStart = sliceEnd + 1
3005
+ }
3006
+
3007
+ subSlices.push(this.str.slice(sliceStart, this.pos))
3008
+ this.escapeCharPositions.length = 0
3009
+
3010
+ return subSlices.join('')
3011
+ }
3012
+
3013
+ lunr.QueryLexer.prototype.emit = function (type) {
3014
+ this.lexemes.push({
3015
+ type: type,
3016
+ str: this.sliceString(),
3017
+ start: this.start,
3018
+ end: this.pos
3019
+ })
3020
+
3021
+ this.start = this.pos
3022
+ }
3023
+
3024
+ lunr.QueryLexer.prototype.escapeCharacter = function () {
3025
+ this.escapeCharPositions.push(this.pos - 1)
3026
+ this.pos += 1
3027
+ }
3028
+
3029
+ lunr.QueryLexer.prototype.next = function () {
3030
+ if (this.pos >= this.length) {
3031
+ return lunr.QueryLexer.EOS
3032
+ }
3033
+
3034
+ var char = this.str.charAt(this.pos)
3035
+ this.pos += 1
3036
+ return char
3037
+ }
3038
+
3039
+ lunr.QueryLexer.prototype.width = function () {
3040
+ return this.pos - this.start
3041
+ }
3042
+
3043
+ lunr.QueryLexer.prototype.ignore = function () {
3044
+ if (this.start == this.pos) {
3045
+ this.pos += 1
3046
+ }
3047
+
3048
+ this.start = this.pos
3049
+ }
3050
+
3051
+ lunr.QueryLexer.prototype.backup = function () {
3052
+ this.pos -= 1
3053
+ }
3054
+
3055
+ lunr.QueryLexer.prototype.acceptDigitRun = function () {
3056
+ var char, charCode
3057
+
3058
+ do {
3059
+ char = this.next()
3060
+ charCode = char.charCodeAt(0)
3061
+ } while (charCode > 47 && charCode < 58)
3062
+
3063
+ if (char != lunr.QueryLexer.EOS) {
3064
+ this.backup()
3065
+ }
3066
+ }
3067
+
3068
+ lunr.QueryLexer.prototype.more = function () {
3069
+ return this.pos < this.length
3070
+ }
3071
+
3072
+ lunr.QueryLexer.EOS = 'EOS'
3073
+ lunr.QueryLexer.FIELD = 'FIELD'
3074
+ lunr.QueryLexer.TERM = 'TERM'
3075
+ lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE'
3076
+ lunr.QueryLexer.BOOST = 'BOOST'
3077
+ lunr.QueryLexer.PRESENCE = 'PRESENCE'
3078
+
3079
+ lunr.QueryLexer.lexField = function (lexer) {
3080
+ lexer.backup()
3081
+ lexer.emit(lunr.QueryLexer.FIELD)
3082
+ lexer.ignore()
3083
+ return lunr.QueryLexer.lexText
3084
+ }
3085
+
3086
+ lunr.QueryLexer.lexTerm = function (lexer) {
3087
+ if (lexer.width() > 1) {
3088
+ lexer.backup()
3089
+ lexer.emit(lunr.QueryLexer.TERM)
3090
+ }
3091
+
3092
+ lexer.ignore()
3093
+
3094
+ if (lexer.more()) {
3095
+ return lunr.QueryLexer.lexText
3096
+ }
3097
+ }
3098
+
3099
+ lunr.QueryLexer.lexEditDistance = function (lexer) {
3100
+ lexer.ignore()
3101
+ lexer.acceptDigitRun()
3102
+ lexer.emit(lunr.QueryLexer.EDIT_DISTANCE)
3103
+ return lunr.QueryLexer.lexText
3104
+ }
3105
+
3106
+ lunr.QueryLexer.lexBoost = function (lexer) {
3107
+ lexer.ignore()
3108
+ lexer.acceptDigitRun()
3109
+ lexer.emit(lunr.QueryLexer.BOOST)
3110
+ return lunr.QueryLexer.lexText
3111
+ }
3112
+
3113
+ lunr.QueryLexer.lexEOS = function (lexer) {
3114
+ if (lexer.width() > 0) {
3115
+ lexer.emit(lunr.QueryLexer.TERM)
3116
+ }
3117
+ }
3118
+
3119
+ // This matches the separator used when tokenising fields
3120
+ // within a document. These should match otherwise it is
3121
+ // not possible to search for some tokens within a document.
3122
+ //
3123
+ // It is possible for the user to change the separator on the
3124
+ // tokenizer so it _might_ clash with any other of the special
3125
+ // characters already used within the search string, e.g. :.
3126
+ //
3127
+ // This means that it is possible to change the separator in
3128
+ // such a way that makes some words unsearchable using a search
3129
+ // string.
3130
+ lunr.QueryLexer.termSeparator = lunr.tokenizer.separator
3131
+
3132
+ lunr.QueryLexer.lexText = function (lexer) {
3133
+ while (true) {
3134
+ var char = lexer.next()
3135
+
3136
+ if (char == lunr.QueryLexer.EOS) {
3137
+ return lunr.QueryLexer.lexEOS
3138
+ }
3139
+
3140
+ // Escape character is '\'
3141
+ if (char.charCodeAt(0) == 92) {
3142
+ lexer.escapeCharacter()
3143
+ continue
3144
+ }
3145
+
3146
+ if (char == ":") {
3147
+ return lunr.QueryLexer.lexField
3148
+ }
3149
+
3150
+ if (char == "~") {
3151
+ lexer.backup()
3152
+ if (lexer.width() > 0) {
3153
+ lexer.emit(lunr.QueryLexer.TERM)
3154
+ }
3155
+ return lunr.QueryLexer.lexEditDistance
3156
+ }
3157
+
3158
+ if (char == "^") {
3159
+ lexer.backup()
3160
+ if (lexer.width() > 0) {
3161
+ lexer.emit(lunr.QueryLexer.TERM)
3162
+ }
3163
+ return lunr.QueryLexer.lexBoost
3164
+ }
3165
+
3166
+ // "+" indicates term presence is required
3167
+ // checking for length to ensure that only
3168
+ // leading "+" are considered
3169
+ if (char == "+" && lexer.width() === 1) {
3170
+ lexer.emit(lunr.QueryLexer.PRESENCE)
3171
+ return lunr.QueryLexer.lexText
3172
+ }
3173
+
3174
+ // "-" indicates term presence is prohibited
3175
+ // checking for length to ensure that only
3176
+ // leading "-" are considered
3177
+ if (char == "-" && lexer.width() === 1) {
3178
+ lexer.emit(lunr.QueryLexer.PRESENCE)
3179
+ return lunr.QueryLexer.lexText
3180
+ }
3181
+
3182
+ if (char.match(lunr.QueryLexer.termSeparator)) {
3183
+ return lunr.QueryLexer.lexTerm
3184
+ }
3185
+ }
3186
+ }
3187
+
3188
+ lunr.QueryParser = function (str, query) {
3189
+ this.lexer = new lunr.QueryLexer (str)
3190
+ this.query = query
3191
+ this.currentClause = {}
3192
+ this.lexemeIdx = 0
3193
+ }
3194
+
3195
+ lunr.QueryParser.prototype.parse = function () {
3196
+ this.lexer.run()
3197
+ this.lexemes = this.lexer.lexemes
3198
+
3199
+ var state = lunr.QueryParser.parseClause
3200
+
3201
+ while (state) {
3202
+ state = state(this)
3203
+ }
3204
+
3205
+ return this.query
3206
+ }
3207
+
3208
+ lunr.QueryParser.prototype.peekLexeme = function () {
3209
+ return this.lexemes[this.lexemeIdx]
3210
+ }
3211
+
3212
+ lunr.QueryParser.prototype.consumeLexeme = function () {
3213
+ var lexeme = this.peekLexeme()
3214
+ this.lexemeIdx += 1
3215
+ return lexeme
3216
+ }
3217
+
3218
+ lunr.QueryParser.prototype.nextClause = function () {
3219
+ var completedClause = this.currentClause
3220
+ this.query.clause(completedClause)
3221
+ this.currentClause = {}
3222
+ }
3223
+
3224
+ lunr.QueryParser.parseClause = function (parser) {
3225
+ var lexeme = parser.peekLexeme()
3226
+
3227
+ if (lexeme == undefined) {
3228
+ return
3229
+ }
3230
+
3231
+ switch (lexeme.type) {
3232
+ case lunr.QueryLexer.PRESENCE:
3233
+ return lunr.QueryParser.parsePresence
3234
+ case lunr.QueryLexer.FIELD:
3235
+ return lunr.QueryParser.parseField
3236
+ case lunr.QueryLexer.TERM:
3237
+ return lunr.QueryParser.parseTerm
3238
+ default:
3239
+ var errorMessage = "expected either a field or a term, found " + lexeme.type
3240
+
3241
+ if (lexeme.str.length >= 1) {
3242
+ errorMessage += " with value '" + lexeme.str + "'"
3243
+ }
3244
+
3245
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3246
+ }
3247
+ }
3248
+
3249
+ lunr.QueryParser.parsePresence = function (parser) {
3250
+ var lexeme = parser.consumeLexeme()
3251
+
3252
+ if (lexeme == undefined) {
3253
+ return
3254
+ }
3255
+
3256
+ switch (lexeme.str) {
3257
+ case "-":
3258
+ parser.currentClause.presence = lunr.Query.presence.PROHIBITED
3259
+ break
3260
+ case "+":
3261
+ parser.currentClause.presence = lunr.Query.presence.REQUIRED
3262
+ break
3263
+ default:
3264
+ var errorMessage = "unrecognised presence operator'" + lexeme.str + "'"
3265
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3266
+ }
3267
+
3268
+ var nextLexeme = parser.peekLexeme()
3269
+
3270
+ if (nextLexeme == undefined) {
3271
+ var errorMessage = "expecting term or field, found nothing"
3272
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3273
+ }
3274
+
3275
+ switch (nextLexeme.type) {
3276
+ case lunr.QueryLexer.FIELD:
3277
+ return lunr.QueryParser.parseField
3278
+ case lunr.QueryLexer.TERM:
3279
+ return lunr.QueryParser.parseTerm
3280
+ default:
3281
+ var errorMessage = "expecting term or field, found '" + nextLexeme.type + "'"
3282
+ throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
3283
+ }
3284
+ }
3285
+
3286
+ lunr.QueryParser.parseField = function (parser) {
3287
+ var lexeme = parser.consumeLexeme()
3288
+
3289
+ if (lexeme == undefined) {
3290
+ return
3291
+ }
3292
+
3293
+ if (parser.query.allFields.indexOf(lexeme.str) == -1) {
3294
+ var possibleFields = parser.query.allFields.map(function (f) { return "'" + f + "'" }).join(', '),
3295
+ errorMessage = "unrecognised field '" + lexeme.str + "', possible fields: " + possibleFields
3296
+
3297
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3298
+ }
3299
+
3300
+ parser.currentClause.fields = [lexeme.str]
3301
+
3302
+ var nextLexeme = parser.peekLexeme()
3303
+
3304
+ if (nextLexeme == undefined) {
3305
+ var errorMessage = "expecting term, found nothing"
3306
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3307
+ }
3308
+
3309
+ switch (nextLexeme.type) {
3310
+ case lunr.QueryLexer.TERM:
3311
+ return lunr.QueryParser.parseTerm
3312
+ default:
3313
+ var errorMessage = "expecting term, found '" + nextLexeme.type + "'"
3314
+ throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
3315
+ }
3316
+ }
3317
+
3318
+ lunr.QueryParser.parseTerm = function (parser) {
3319
+ var lexeme = parser.consumeLexeme()
3320
+
3321
+ if (lexeme == undefined) {
3322
+ return
3323
+ }
3324
+
3325
+ parser.currentClause.term = lexeme.str.toLowerCase()
3326
+
3327
+ if (lexeme.str.indexOf("*") != -1) {
3328
+ parser.currentClause.usePipeline = false
3329
+ }
3330
+
3331
+ var nextLexeme = parser.peekLexeme()
3332
+
3333
+ if (nextLexeme == undefined) {
3334
+ parser.nextClause()
3335
+ return
3336
+ }
3337
+
3338
+ switch (nextLexeme.type) {
3339
+ case lunr.QueryLexer.TERM:
3340
+ parser.nextClause()
3341
+ return lunr.QueryParser.parseTerm
3342
+ case lunr.QueryLexer.FIELD:
3343
+ parser.nextClause()
3344
+ return lunr.QueryParser.parseField
3345
+ case lunr.QueryLexer.EDIT_DISTANCE:
3346
+ return lunr.QueryParser.parseEditDistance
3347
+ case lunr.QueryLexer.BOOST:
3348
+ return lunr.QueryParser.parseBoost
3349
+ case lunr.QueryLexer.PRESENCE:
3350
+ parser.nextClause()
3351
+ return lunr.QueryParser.parsePresence
3352
+ default:
3353
+ var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
3354
+ throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
3355
+ }
3356
+ }
3357
+
3358
+ lunr.QueryParser.parseEditDistance = function (parser) {
3359
+ var lexeme = parser.consumeLexeme()
3360
+
3361
+ if (lexeme == undefined) {
3362
+ return
3363
+ }
3364
+
3365
+ var editDistance = parseInt(lexeme.str, 10)
3366
+
3367
+ if (isNaN(editDistance)) {
3368
+ var errorMessage = "edit distance must be numeric"
3369
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3370
+ }
3371
+
3372
+ parser.currentClause.editDistance = editDistance
3373
+
3374
+ var nextLexeme = parser.peekLexeme()
3375
+
3376
+ if (nextLexeme == undefined) {
3377
+ parser.nextClause()
3378
+ return
3379
+ }
3380
+
3381
+ switch (nextLexeme.type) {
3382
+ case lunr.QueryLexer.TERM:
3383
+ parser.nextClause()
3384
+ return lunr.QueryParser.parseTerm
3385
+ case lunr.QueryLexer.FIELD:
3386
+ parser.nextClause()
3387
+ return lunr.QueryParser.parseField
3388
+ case lunr.QueryLexer.EDIT_DISTANCE:
3389
+ return lunr.QueryParser.parseEditDistance
3390
+ case lunr.QueryLexer.BOOST:
3391
+ return lunr.QueryParser.parseBoost
3392
+ case lunr.QueryLexer.PRESENCE:
3393
+ parser.nextClause()
3394
+ return lunr.QueryParser.parsePresence
3395
+ default:
3396
+ var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
3397
+ throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
3398
+ }
3399
+ }
3400
+
3401
+ lunr.QueryParser.parseBoost = function (parser) {
3402
+ var lexeme = parser.consumeLexeme()
3403
+
3404
+ if (lexeme == undefined) {
3405
+ return
3406
+ }
3407
+
3408
+ var boost = parseInt(lexeme.str, 10)
3409
+
3410
+ if (isNaN(boost)) {
3411
+ var errorMessage = "boost must be numeric"
3412
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3413
+ }
3414
+
3415
+ parser.currentClause.boost = boost
3416
+
3417
+ var nextLexeme = parser.peekLexeme()
3418
+
3419
+ if (nextLexeme == undefined) {
3420
+ parser.nextClause()
3421
+ return
3422
+ }
3423
+
3424
+ switch (nextLexeme.type) {
3425
+ case lunr.QueryLexer.TERM:
3426
+ parser.nextClause()
3427
+ return lunr.QueryParser.parseTerm
3428
+ case lunr.QueryLexer.FIELD:
3429
+ parser.nextClause()
3430
+ return lunr.QueryParser.parseField
3431
+ case lunr.QueryLexer.EDIT_DISTANCE:
3432
+ return lunr.QueryParser.parseEditDistance
3433
+ case lunr.QueryLexer.BOOST:
3434
+ return lunr.QueryParser.parseBoost
3435
+ case lunr.QueryLexer.PRESENCE:
3436
+ parser.nextClause()
3437
+ return lunr.QueryParser.parsePresence
3438
+ default:
3439
+ var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
3440
+ throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
3441
+ }
3442
+ }
3443
+
3444
+ /**
3445
+ * export the module via AMD, CommonJS or as a browser global
3446
+ * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js
3447
+ */
3448
+ ;(function (root, factory) {
3449
+ if (typeof define === 'function' && define.amd) {
3450
+ // AMD. Register as an anonymous module.
3451
+ define(factory)
3452
+ } else if (typeof exports === 'object') {
3453
+ /**
3454
+ * Node. Does not work with strict CommonJS, but
3455
+ * only CommonJS-like enviroments that support module.exports,
3456
+ * like Node.
3457
+ */
3458
+ module.exports = factory()
3459
+ } else {
3460
+ // Browser globals (root is window)
3461
+ root.lunr = factory()
3462
+ }
3463
+ }(this, function () {
3464
+ /**
3465
+ * Just return a value to define the module export.
3466
+ * This example returns an object, but the module
3467
+ * can return a function as the exported value.
3468
+ */
3469
+ return lunr
3470
+ }))
3471
+ })();