betterplace-content 0.1.16 → 0.1.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: c0d060fb7d99cea137dbd4ac45cbe1bb06da1748
4
- data.tar.gz: ad6e489c8f3aba4bd251069442103bc55ea8f8b3
3
+ metadata.gz: 4467072cf9affc5f5a2b62ad387557091d3af03b
4
+ data.tar.gz: 9159665a47e8db2e1eb1fa7b98a07e696c2c79b5
5
5
  SHA512:
6
- metadata.gz: 1e33bc0a9f8beb3ff054bfada3f21d4025dc3efbbda885bc7a5881970cddae9edad8f12ded242ab69a6cac8a11ca703fb3955a0de12786a5ab1c6edb2697ab58
7
- data.tar.gz: 0c8a7cd5626c8f3e941e9040f08fe04b4adfce8f942ffe10ad2690bd5d79dc0232aafae37c1b78470ed865c2f5648ecb761ad6cbcf44979ec60addc952b26ca1
6
+ metadata.gz: d63525504b6034b68cf2d26db4b0a943872a55564e3661457a455418db4c0763ed4086a63802660785845b335c498dd61dad6ac0d5d57e47439364619d8c3126
7
+ data.tar.gz: 8f6778fc98bb437dedf7dfa981f2189ea162729d80b4b0cf1119acc9866b3d67f5ed0c573b77f8003a4efc0d9f49da6ebb245ef7f4b17dc8aa8a6265ce972b6a
@@ -38,14 +38,14 @@ if(window.getDevicePixelRatio!==undefined) {
38
38
  ga('set', 'dimension2', window.getDevicePixelRatio()); // DevicePixelRatio (Session)
39
39
  }
40
40
 
41
- ga('set', 'dimension4', 'de'); // Sprache der URL (Hit)
42
- ga('set', 'dimension5', '{{ page.url }}');
41
+ ga('set', 'dimension4', '{{ site.lang }}'); // Sprache der URL (Hit)
42
+ ga('set', 'dimension5', '{{ site.baseurl }}{{ page.url }}');
43
43
 
44
44
  ga('require', 'linkid');
45
45
  ga('require', 'displayfeatures');
46
46
  ga('require', 'maxScrollTracker');
47
47
  ga('require', 'outboundLinkTracker');
48
- ga('require', 'GTM-WK6QJ9P');
48
+ // ga('require', 'GTM-WK6QJ9P');
49
49
 
50
50
  ga('send', 'pageview');
51
51
  </script>
@@ -0,0 +1,61 @@
1
+ ---
2
+ layout: default
3
+ ---
4
+
5
+ <div class="generic-content-header">
6
+ <div class="container">
7
+ <div class="row">
8
+ <div class="col-md-18">
9
+ <header>
10
+ <h1>
11
+ <a href="{{ site.baseurl }}">Hilfe</a>
12
+ <b><img src="//www.betterplace.org/c/wp-content/themes/betterplace.org//images/breadcrumb-arrow.png" alt=">" heigth="13" width="8"></b>
13
+ {{ page.title }}
14
+ </h1>
15
+
16
+ <div class="generic-search-form">
17
+ <form action="{{ site.baseurl }}" method="get">
18
+ <div class="input-group">
19
+ <input type="text" id="search-box" name="s" class="form-control" value="">
20
+ <input name="ga_category" value="content-pages--help" type="hidden">
21
+ <span class="input-group-btn hidden-xs">
22
+ <input value="Hilfe durchsuchen" class="btn btn-default" type="submit">
23
+ </span>
24
+ </div>
25
+ </form>
26
+ </div>
27
+ </header>
28
+ </div>
29
+ </div>
30
+ </div>
31
+ </div>
32
+
33
+ <div class="content-wrapper">
34
+ <div class="container centered">
35
+ <div class="row">
36
+ <div class="col-md-18">
37
+ <article>
38
+ {{ content }}
39
+ </article>
40
+
41
+ <hr/>
42
+
43
+ {% if page.categories.size > 0 %}
44
+ <div class="themenline">
45
+ <strong>Themen:</strong>
46
+ {% for category in page.categories %}
47
+ <a href="{{ site.baseurl }}/{{ category | slugify }}" rel="category tag">{{ category }}</a>{% unless forloop.last %},{% endunless %}
48
+ {% endfor %}
49
+ </div>
50
+ {% endif %}
51
+
52
+ <div class="descText">
53
+ Geschrieben von {{ page.author.display_name }} am {{ page.date | date:"%d.%m.%Y" }}. Zuletzt aktualisiert am {{ page.modified | date:"%d.%m.%Y" }}.
54
+ </div>
55
+ </div>
56
+ </div>
57
+ </div>
58
+ </div>
59
+
60
+ <script src="{{ site.baseurl }}/assets/js/lunr.js"></script>
61
+ <script src="{{ site.baseurl }}/assets/js/help_search.js"></script>
data/_sass/about-us.sass CHANGED
@@ -14,4 +14,3 @@ body.about-us
14
14
  @import "about-us/history"
15
15
  @import "about-us/transparency"
16
16
  @import "about-us/social-sharing"
17
- @import "about-us/freundeskreis"
@@ -6,7 +6,7 @@
6
6
 
7
7
  +phone
8
8
  background: url('#{$site_baseurl}/assets/theme_images/about-us/because-icon-phone.png') no-repeat white 50% 125px
9
- background-size: 548px/2 492px/2
9
+ background-size: (548px/2) (492px/2)
10
10
 
11
11
  .container
12
12
  +desktop
@@ -58,7 +58,7 @@
58
58
  content: ''
59
59
  display: block
60
60
  background: url('#{$site_baseurl}/assets/theme_images/about-us/because-icon-phone.png') no-repeat 0 0
61
- background-size: 548px / 2 * 1.3 492px / 2 * 1.3
61
+ background-size: (548px / 2 * 1.3) (492px / 2 * 1.3)
62
62
  margin: 0 auto
63
63
 
64
64
  &.because-betterplace-org:before
@@ -9,6 +9,7 @@
9
9
  // @import initialize/mixins
10
10
  // @import initialize/media_queries
11
11
  // @import initialize/buttons
12
+ // @import initialize/utilities_spacing
12
13
  // @import layouts/header
13
14
  // @import layouts/footer
14
15
  // @import initialize/type
@@ -555,6 +556,195 @@ html.touch .btn.btn-primary.mobile-friendly-hover:hover, html.touch a.btn.btn-pr
555
556
  }
556
557
  }
557
558
 
559
+ .m-a-0 {
560
+ margin: 0 !important;
561
+ }
562
+ .m-t-0 {
563
+ margin-top: 0 !important;
564
+ }
565
+ .m-r-0 {
566
+ margin-right: 0 !important;
567
+ }
568
+ .m-b-0 {
569
+ margin-bottom: 0 !important;
570
+ }
571
+ .m-l-0 {
572
+ margin-left: 0 !important;
573
+ }
574
+ .m-x-0 {
575
+ margin-right: 0 !important;
576
+ margin-left: 0 !important;
577
+ }
578
+ .m-y-0 {
579
+ margin-top: 0 !important;
580
+ margin-bottom: 0 !important;
581
+ }
582
+ .m-a {
583
+ margin: 20px !important;
584
+ }
585
+ .m-t {
586
+ margin-top: 20px !important;
587
+ }
588
+ .m-r {
589
+ margin-right: 20px !important;
590
+ }
591
+ .m-b {
592
+ margin-bottom: 20px !important;
593
+ }
594
+ .m-l {
595
+ margin-left: 20px !important;
596
+ }
597
+ .m-x {
598
+ margin-right: 20px !important;
599
+ margin-left: 20px !important;
600
+ }
601
+ .m-y {
602
+ margin-top: 20px !important;
603
+ margin-bottom: 20px !important;
604
+ }
605
+ .m-x-auto {
606
+ margin-right: auto !important;
607
+ margin-left: auto !important;
608
+ }
609
+ .m-a-md {
610
+ margin: 30px !important;
611
+ }
612
+ .m-t-md {
613
+ margin-top: 30px !important;
614
+ }
615
+ .m-r-md {
616
+ margin-right: 30px !important;
617
+ }
618
+ .m-b-md {
619
+ margin-bottom: 30px !important;
620
+ }
621
+ .m-l-md {
622
+ margin-left: 30px !important;
623
+ }
624
+ .m-x-md {
625
+ margin-right: 30px !important;
626
+ margin-left: 30px !important;
627
+ }
628
+ .m-y-md {
629
+ margin-top: 30px !important;
630
+ margin-bottom: 30px !important;
631
+ }
632
+ .m-a-lg {
633
+ margin: 60px !important;
634
+ }
635
+ .m-t-lg {
636
+ margin-top: 60px !important;
637
+ }
638
+ .m-r-lg {
639
+ margin-right: 60px !important;
640
+ }
641
+ .m-b-lg {
642
+ margin-bottom: 60px !important;
643
+ }
644
+ .m-l-lg {
645
+ margin-left: 60px !important;
646
+ }
647
+ .m-x-lg {
648
+ margin-right: 60px !important;
649
+ margin-left: 60px !important;
650
+ }
651
+ .m-y-lg {
652
+ margin-top: 60px !important;
653
+ margin-bottom: 60px !important;
654
+ }
655
+ .p-a-0 {
656
+ padding: 0 !important;
657
+ }
658
+ .p-t-0 {
659
+ padding-top: 0 !important;
660
+ }
661
+ .p-r-0 {
662
+ padding-right: 0 !important;
663
+ }
664
+ .p-b-0 {
665
+ padding-bottom: 0 !important;
666
+ }
667
+ .p-l-0 {
668
+ padding-left: 0 !important;
669
+ }
670
+ .p-x-0 {
671
+ padding-right: 0 !important;
672
+ padding-left: 0 !important;
673
+ }
674
+ .p-y-0 {
675
+ padding-top: 0 !important;
676
+ padding-bottom: 0 !important;
677
+ }
678
+ .p-a {
679
+ padding: 20px !important;
680
+ }
681
+ .p-t {
682
+ padding-top: 20px !important;
683
+ }
684
+ .p-r {
685
+ padding-right: 20px !important;
686
+ }
687
+ .p-b {
688
+ padding-bottom: 20px !important;
689
+ }
690
+ .p-l {
691
+ padding-left: 20px !important;
692
+ }
693
+ .p-x {
694
+ padding-right: 20px !important;
695
+ padding-left: 20px !important;
696
+ }
697
+ .p-y {
698
+ padding-top: 20px !important;
699
+ padding-bottom: 20px !important;
700
+ }
701
+ .p-a-md {
702
+ padding: 30px !important;
703
+ }
704
+ .p-t-md {
705
+ padding-top: 30px !important;
706
+ }
707
+ .p-r-md {
708
+ padding-right: 30px !important;
709
+ }
710
+ .p-b-md {
711
+ padding-bottom: 30px !important;
712
+ }
713
+ .p-l-md {
714
+ padding-left: 30px !important;
715
+ }
716
+ .p-x-md {
717
+ padding-right: 30px !important;
718
+ padding-left: 30px !important;
719
+ }
720
+ .p-y-md {
721
+ padding-top: 30px !important;
722
+ padding-bottom: 30px !important;
723
+ }
724
+ .p-a-lg {
725
+ padding: 60px !important;
726
+ }
727
+ .p-t-lg {
728
+ padding-top: 60px !important;
729
+ }
730
+ .p-r-lg {
731
+ padding-right: 60px !important;
732
+ }
733
+ .p-b-lg {
734
+ padding-bottom: 60px !important;
735
+ }
736
+ .p-l-lg {
737
+ padding-left: 60px !important;
738
+ }
739
+ .p-x-lg {
740
+ padding-right: 60px !important;
741
+ padding-left: 60px !important;
742
+ }
743
+ .p-y-lg {
744
+ padding-top: 60px !important;
745
+ padding-bottom: 60px !important;
746
+ }
747
+
558
748
  body > header {
559
749
  position: relative;
560
750
  overflow: hidden;
@@ -1,5 +1,3 @@
1
- // in blog "about us" reinschieben
2
-
3
1
  .freundeskreis
4
2
  background: white
5
3
 
@@ -8,3 +8,4 @@ $site_baseurl: "{{ site.baseurl }}"
8
8
  @import "jobs"
9
9
  @import "news"
10
10
  @import "team"
11
+ @import "freundeskreis"
@@ -0,0 +1,66 @@
1
+ (function() {
2
+ function displaySearchResults(results, store) {
3
+ var searchResults = document.getElementById('search-results');
4
+
5
+ if (results.length) { // Are there any results?
6
+ var appendString = '';
7
+
8
+ for (var i = 0; i < results.length; i++) { // Iterate over the results
9
+ var item = store[results[i].ref];
10
+ appendString += '<li><a href="' + item.url + '"><h3>' + item.title + '</h3></a>';
11
+ appendString += '<p>' + item.content.substring(0, 150) + '...</p></li>';
12
+ }
13
+
14
+ searchResults.innerHTML = appendString;
15
+ } else {
16
+ searchResults.innerHTML = '<li>No results found</li>';
17
+ }
18
+ }
19
+
20
+ function getQueryVariable(variable) {
21
+ var query = window.location.search.substring(1);
22
+ var vars = query.split('&');
23
+
24
+ for (var i = 0; i < vars.length; i++) {
25
+ var pair = vars[i].split('=');
26
+
27
+ if (pair[0] === variable) {
28
+ return decodeURIComponent(pair[1].replace(/\+/g, '%20'));
29
+ }
30
+ }
31
+ }
32
+
33
+ var searchTerm = getQueryVariable('s');
34
+
35
+ if (searchTerm) {
36
+ document.getElementById('search-box').setAttribute("value", searchTerm);
37
+
38
+ // Initalize lunr with the fields it will be searching on. I've given title
39
+ // a boost of 10 to indicate matches on this field are more important.
40
+ var idx = lunr(function () {
41
+ this.field('id');
42
+ this.field('title', { boost: 10 });
43
+ this.field('author');
44
+ this.field('categories');
45
+ this.field('tags');
46
+ this.field('content');
47
+
48
+ for (var key in window.store) { // Add the data to lunr
49
+ this.add({
50
+ 'id': key,
51
+ 'title': window.store[key].title,
52
+ 'author': window.store[key].author,
53
+ 'categories': window.store[key].categories,
54
+ 'tags': window.store[key].tags,
55
+ 'content': window.store[key].content
56
+ });
57
+
58
+ }
59
+
60
+ });
61
+
62
+ var results = idx.search(searchTerm); // Get lunr to perform a search
63
+ displaySearchResults(results, window.store); // We'll write this in the next section
64
+
65
+ }
66
+ })();
data/assets/js/lunr.js ADDED
@@ -0,0 +1,2923 @@
1
+ /**
2
+ * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.1.3
3
+ * Copyright (C) 2017 Oliver Nightingale
4
+ * @license MIT
5
+ */
6
+
7
+ ;(function(){
8
+
9
+ /**
10
+ * A convenience function for configuring and constructing
11
+ * a new lunr Index.
12
+ *
13
+ * A lunr.Builder instance is created and the pipeline setup
14
+ * with a trimmer, stop word filter and stemmer.
15
+ *
16
+ * This builder object is yielded to the configuration function
17
+ * that is passed as a parameter, allowing the list of fields
18
+ * and other builder parameters to be customised.
19
+ *
20
+ * All documents _must_ be added within the passed config function.
21
+ *
22
+ * @example
23
+ * var idx = lunr(function () {
24
+ * this.field('title')
25
+ * this.field('body')
26
+ * this.ref('id')
27
+ *
28
+ * documents.forEach(function (doc) {
29
+ * this.add(doc)
30
+ * }, this)
31
+ * })
32
+ *
33
+ * @see {@link lunr.Builder}
34
+ * @see {@link lunr.Pipeline}
35
+ * @see {@link lunr.trimmer}
36
+ * @see {@link lunr.stopWordFilter}
37
+ * @see {@link lunr.stemmer}
38
+ * @namespace {function} lunr
39
+ */
40
+ var lunr = function (config) {
41
+ var builder = new lunr.Builder
42
+
43
+ builder.pipeline.add(
44
+ lunr.trimmer,
45
+ lunr.stopWordFilter,
46
+ lunr.stemmer
47
+ )
48
+
49
+ builder.searchPipeline.add(
50
+ lunr.stemmer
51
+ )
52
+
53
+ config.call(builder, builder)
54
+ return builder.build()
55
+ }
56
+
57
+ lunr.version = "2.1.3"
58
+ /*!
59
+ * lunr.utils
60
+ * Copyright (C) 2017 Oliver Nightingale
61
+ */
62
+
63
+ /**
64
+ * A namespace containing utils for the rest of the lunr library
65
+ */
66
+ lunr.utils = {}
67
+
68
+ /**
69
+ * Print a warning message to the console.
70
+ *
71
+ * @param {String} message The message to be printed.
72
+ * @memberOf Utils
73
+ */
74
+ lunr.utils.warn = (function (global) {
75
+ /* eslint-disable no-console */
76
+ return function (message) {
77
+ if (global.console && console.warn) {
78
+ console.warn(message)
79
+ }
80
+ }
81
+ /* eslint-enable no-console */
82
+ })(this)
83
+
84
+ /**
85
+ * Convert an object to a string.
86
+ *
87
+ * In the case of `null` and `undefined` the function returns
88
+ * the empty string, in all other cases the result of calling
89
+ * `toString` on the passed object is returned.
90
+ *
91
+ * @param {Any} obj The object to convert to a string.
92
+ * @return {String} string representation of the passed object.
93
+ * @memberOf Utils
94
+ */
95
+ lunr.utils.asString = function (obj) {
96
+ if (obj === void 0 || obj === null) {
97
+ return ""
98
+ } else {
99
+ return obj.toString()
100
+ }
101
+ }
102
+ lunr.FieldRef = function (docRef, fieldName) {
103
+ this.docRef = docRef
104
+ this.fieldName = fieldName
105
+ this._stringValue = fieldName + lunr.FieldRef.joiner + docRef
106
+ }
107
+
108
+ lunr.FieldRef.joiner = "/"
109
+
110
+ lunr.FieldRef.fromString = function (s) {
111
+ var n = s.indexOf(lunr.FieldRef.joiner)
112
+
113
+ if (n === -1) {
114
+ throw "malformed field ref string"
115
+ }
116
+
117
+ var fieldRef = s.slice(0, n),
118
+ docRef = s.slice(n + 1)
119
+
120
+ return new lunr.FieldRef (docRef, fieldRef)
121
+ }
122
+
123
+ lunr.FieldRef.prototype.toString = function () {
124
+ return this._stringValue
125
+ }
126
+ /**
127
+ * A function to calculate the inverse document frequency for
128
+ * a posting. This is shared between the builder and the index
129
+ *
130
+ * @private
131
+ * @param {object} posting - The posting for a given term
132
+ * @param {number} documentCount - The total number of documents.
133
+ */
134
+ lunr.idf = function (posting, documentCount) {
135
+ var documentsWithTerm = 0
136
+
137
+ for (var fieldName in posting) {
138
+ if (fieldName == '_index') continue // Ignore the term index, its not a field
139
+ documentsWithTerm += Object.keys(posting[fieldName]).length
140
+ }
141
+
142
+ var x = (documentCount - documentsWithTerm + 0.5) / (documentsWithTerm + 0.5)
143
+
144
+ return Math.log(1 + Math.abs(x))
145
+ }
146
+
147
+ /**
148
+ * A token wraps a string representation of a token
149
+ * as it is passed through the text processing pipeline.
150
+ *
151
+ * @constructor
152
+ * @param {string} [str=''] - The string token being wrapped.
153
+ * @param {object} [metadata={}] - Metadata associated with this token.
154
+ */
155
+ lunr.Token = function (str, metadata) {
156
+ this.str = str || ""
157
+ this.metadata = metadata || {}
158
+ }
159
+
160
+ /**
161
+ * Returns the token string that is being wrapped by this object.
162
+ *
163
+ * @returns {string}
164
+ */
165
+ lunr.Token.prototype.toString = function () {
166
+ return this.str
167
+ }
168
+
169
+ /**
170
+ * A token update function is used when updating or optionally
171
+ * when cloning a token.
172
+ *
173
+ * @callback lunr.Token~updateFunction
174
+ * @param {string} str - The string representation of the token.
175
+ * @param {Object} metadata - All metadata associated with this token.
176
+ */
177
+
178
+ /**
179
+ * Applies the given function to the wrapped string token.
180
+ *
181
+ * @example
182
+ * token.update(function (str, metadata) {
183
+ * return str.toUpperCase()
184
+ * })
185
+ *
186
+ * @param {lunr.Token~updateFunction} fn - A function to apply to the token string.
187
+ * @returns {lunr.Token}
188
+ */
189
+ lunr.Token.prototype.update = function (fn) {
190
+ this.str = fn(this.str, this.metadata)
191
+ return this
192
+ }
193
+
194
+ /**
195
+ * Creates a clone of this token. Optionally a function can be
196
+ * applied to the cloned token.
197
+ *
198
+ * @param {lunr.Token~updateFunction} [fn] - An optional function to apply to the cloned token.
199
+ * @returns {lunr.Token}
200
+ */
201
+ lunr.Token.prototype.clone = function (fn) {
202
+ fn = fn || function (s) { return s }
203
+ return new lunr.Token (fn(this.str, this.metadata), this.metadata)
204
+ }
205
+ /*!
206
+ * lunr.tokenizer
207
+ * Copyright (C) 2017 Oliver Nightingale
208
+ */
209
+
210
+ /**
211
+ * A function for splitting a string into tokens ready to be inserted into
212
+ * the search index. Uses `lunr.tokenizer.separator` to split strings, change
213
+ * the value of this property to change how strings are split into tokens.
214
+ *
215
+ * This tokenizer will convert its parameter to a string by calling `toString` and
216
+ * then will split this string on the character in `lunr.tokenizer.separator`.
217
+ * Arrays will have their elements converted to strings and wrapped in a lunr.Token.
218
+ *
219
+ * @static
220
+ * @param {?(string|object|object[])} obj - The object to convert into tokens
221
+ * @returns {lunr.Token[]}
222
+ */
223
+ lunr.tokenizer = function (obj) {
224
+ if (obj == null || obj == undefined) {
225
+ return []
226
+ }
227
+
228
+ if (Array.isArray(obj)) {
229
+ return obj.map(function (t) {
230
+ return new lunr.Token(lunr.utils.asString(t).toLowerCase())
231
+ })
232
+ }
233
+
234
+ var str = obj.toString().trim().toLowerCase(),
235
+ len = str.length,
236
+ tokens = []
237
+
238
+ for (var sliceEnd = 0, sliceStart = 0; sliceEnd <= len; sliceEnd++) {
239
+ var char = str.charAt(sliceEnd),
240
+ sliceLength = sliceEnd - sliceStart
241
+
242
+ if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) {
243
+
244
+ if (sliceLength > 0) {
245
+ tokens.push(
246
+ new lunr.Token (str.slice(sliceStart, sliceEnd), {
247
+ position: [sliceStart, sliceLength],
248
+ index: tokens.length
249
+ })
250
+ )
251
+ }
252
+
253
+ sliceStart = sliceEnd + 1
254
+ }
255
+
256
+ }
257
+
258
+ return tokens
259
+ }
260
+
261
+ /**
262
+ * The separator used to split a string into tokens. Override this property to change the behaviour of
263
+ * `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens.
264
+ *
265
+ * @static
266
+ * @see lunr.tokenizer
267
+ */
268
+ lunr.tokenizer.separator = /[\s\-]+/
269
+ /*!
270
+ * lunr.Pipeline
271
+ * Copyright (C) 2017 Oliver Nightingale
272
+ */
273
+
274
+ /**
275
+ * lunr.Pipelines maintain an ordered list of functions to be applied to all
276
+ * tokens in documents entering the search index and queries being ran against
277
+ * the index.
278
+ *
279
+ * An instance of lunr.Index created with the lunr shortcut will contain a
280
+ * pipeline with a stop word filter and an English language stemmer. Extra
281
+ * functions can be added before or after either of these functions or these
282
+ * default functions can be removed.
283
+ *
284
+ * When run the pipeline will call each function in turn, passing a token, the
285
+ * index of that token in the original list of all tokens and finally a list of
286
+ * all the original tokens.
287
+ *
288
+ * The output of functions in the pipeline will be passed to the next function
289
+ * in the pipeline. To exclude a token from entering the index the function
290
+ * should return undefined, the rest of the pipeline will not be called with
291
+ * this token.
292
+ *
293
+ * For serialisation of pipelines to work, all functions used in an instance of
294
+ * a pipeline should be registered with lunr.Pipeline. Registered functions can
295
+ * then be loaded. If trying to load a serialised pipeline that uses functions
296
+ * that are not registered an error will be thrown.
297
+ *
298
+ * If not planning on serialising the pipeline then registering pipeline functions
299
+ * is not necessary.
300
+ *
301
+ * @constructor
302
+ */
303
+ lunr.Pipeline = function () {
304
+ this._stack = []
305
+ }
306
+
307
+ lunr.Pipeline.registeredFunctions = Object.create(null)
308
+
309
+ /**
310
+ * A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token
311
+ * string as well as all known metadata. A pipeline function can mutate the token string
312
+ * or mutate (or add) metadata for a given token.
313
+ *
314
+ * A pipeline function can indicate that the passed token should be discarded by returning
315
+ * null. This token will not be passed to any downstream pipeline functions and will not be
316
+ * added to the index.
317
+ *
318
+ * Multiple tokens can be returned by returning an array of tokens. Each token will be passed
319
+ * to any downstream pipeline functions and all will returned tokens will be added to the index.
320
+ *
321
+ * Any number of pipeline functions may be chained together using a lunr.Pipeline.
322
+ *
323
+ * @interface lunr.PipelineFunction
324
+ * @param {lunr.Token} token - A token from the document being processed.
325
+ * @param {number} i - The index of this token in the complete list of tokens for this document/field.
326
+ * @param {lunr.Token[]} tokens - All tokens for this document/field.
327
+ * @returns {(?lunr.Token|lunr.Token[])}
328
+ */
329
+
330
+ /**
331
+ * Register a function with the pipeline.
332
+ *
333
+ * Functions that are used in the pipeline should be registered if the pipeline
334
+ * needs to be serialised, or a serialised pipeline needs to be loaded.
335
+ *
336
+ * Registering a function does not add it to a pipeline, functions must still be
337
+ * added to instances of the pipeline for them to be used when running a pipeline.
338
+ *
339
+ * @param {lunr.PipelineFunction} fn - The function to check for.
340
+ * @param {String} label - The label to register this function with
341
+ */
342
+ lunr.Pipeline.registerFunction = function (fn, label) {
343
+ if (label in this.registeredFunctions) {
344
+ lunr.utils.warn('Overwriting existing registered function: ' + label)
345
+ }
346
+
347
+ fn.label = label
348
+ lunr.Pipeline.registeredFunctions[fn.label] = fn
349
+ }
350
+
351
+ /**
352
+ * Warns if the function is not registered as a Pipeline function.
353
+ *
354
+ * @param {lunr.PipelineFunction} fn - The function to check for.
355
+ * @private
356
+ */
357
+ lunr.Pipeline.warnIfFunctionNotRegistered = function (fn) {
358
+ var isRegistered = fn.label && (fn.label in this.registeredFunctions)
359
+
360
+ if (!isRegistered) {
361
+ lunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\n', fn)
362
+ }
363
+ }
364
+
365
+ /**
366
+ * Loads a previously serialised pipeline.
367
+ *
368
+ * All functions to be loaded must already be registered with lunr.Pipeline.
369
+ * If any function from the serialised data has not been registered then an
370
+ * error will be thrown.
371
+ *
372
+ * @param {Object} serialised - The serialised pipeline to load.
373
+ * @returns {lunr.Pipeline}
374
+ */
375
+ lunr.Pipeline.load = function (serialised) {
376
+ var pipeline = new lunr.Pipeline
377
+
378
+ serialised.forEach(function (fnName) {
379
+ var fn = lunr.Pipeline.registeredFunctions[fnName]
380
+
381
+ if (fn) {
382
+ pipeline.add(fn)
383
+ } else {
384
+ throw new Error('Cannot load unregistered function: ' + fnName)
385
+ }
386
+ })
387
+
388
+ return pipeline
389
+ }
390
+
391
+ /**
392
+ * Adds new functions to the end of the pipeline.
393
+ *
394
+ * Logs a warning if the function has not been registered.
395
+ *
396
+ * @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline.
397
+ */
398
+ lunr.Pipeline.prototype.add = function () {
399
+ var fns = Array.prototype.slice.call(arguments)
400
+
401
+ fns.forEach(function (fn) {
402
+ lunr.Pipeline.warnIfFunctionNotRegistered(fn)
403
+ this._stack.push(fn)
404
+ }, this)
405
+ }
406
+
407
+ /**
408
+ * Adds a single function after a function that already exists in the
409
+ * pipeline.
410
+ *
411
+ * Logs a warning if the function has not been registered.
412
+ *
413
+ * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
414
+ * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
415
+ */
416
+ lunr.Pipeline.prototype.after = function (existingFn, newFn) {
417
+ lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
418
+
419
+ var pos = this._stack.indexOf(existingFn)
420
+ if (pos == -1) {
421
+ throw new Error('Cannot find existingFn')
422
+ }
423
+
424
+ pos = pos + 1
425
+ this._stack.splice(pos, 0, newFn)
426
+ }
427
+
428
+ /**
429
+ * Adds a single function before a function that already exists in the
430
+ * pipeline.
431
+ *
432
+ * Logs a warning if the function has not been registered.
433
+ *
434
+ * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
435
+ * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
436
+ */
437
+ lunr.Pipeline.prototype.before = function (existingFn, newFn) {
438
+ lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
439
+
440
+ var pos = this._stack.indexOf(existingFn)
441
+ if (pos == -1) {
442
+ throw new Error('Cannot find existingFn')
443
+ }
444
+
445
+ this._stack.splice(pos, 0, newFn)
446
+ }
447
+
448
+ /**
449
+ * Removes a function from the pipeline.
450
+ *
451
+ * @param {lunr.PipelineFunction} fn The function to remove from the pipeline.
452
+ */
453
+ lunr.Pipeline.prototype.remove = function (fn) {
454
+ var pos = this._stack.indexOf(fn)
455
+ if (pos == -1) {
456
+ return
457
+ }
458
+
459
+ this._stack.splice(pos, 1)
460
+ }
461
+
462
+ /**
463
+ * Runs the current list of functions that make up the pipeline against the
464
+ * passed tokens.
465
+ *
466
+ * @param {Array} tokens The tokens to run through the pipeline.
467
+ * @returns {Array}
468
+ */
469
+ lunr.Pipeline.prototype.run = function (tokens) {
470
+ var stackLength = this._stack.length
471
+
472
+ for (var i = 0; i < stackLength; i++) {
473
+ var fn = this._stack[i]
474
+
475
+ tokens = tokens.reduce(function (memo, token, j) {
476
+ var result = fn(token, j, tokens)
477
+
478
+ if (result === void 0 || result === '') return memo
479
+
480
+ return memo.concat(result)
481
+ }, [])
482
+ }
483
+
484
+ return tokens
485
+ }
486
+
487
+ /**
488
+ * Convenience method for passing a string through a pipeline and getting
489
+ * strings out. This method takes care of wrapping the passed string in a
490
+ * token and mapping the resulting tokens back to strings.
491
+ *
492
+ * @param {string} str - The string to pass through the pipeline.
493
+ * @returns {string[]}
494
+ */
495
+ lunr.Pipeline.prototype.runString = function (str) {
496
+ var token = new lunr.Token (str)
497
+
498
+ return this.run([token]).map(function (t) {
499
+ return t.toString()
500
+ })
501
+ }
502
+
503
+ /**
504
+ * Resets the pipeline by removing any existing processors.
505
+ *
506
+ */
507
+ lunr.Pipeline.prototype.reset = function () {
508
+ this._stack = []
509
+ }
510
+
511
+ /**
512
+ * Returns a representation of the pipeline ready for serialisation.
513
+ *
514
+ * Logs a warning if the function has not been registered.
515
+ *
516
+ * @returns {Array}
517
+ */
518
+ lunr.Pipeline.prototype.toJSON = function () {
519
+ return this._stack.map(function (fn) {
520
+ lunr.Pipeline.warnIfFunctionNotRegistered(fn)
521
+
522
+ return fn.label
523
+ })
524
+ }
525
+ /*!
526
+ * lunr.Vector
527
+ * Copyright (C) 2017 Oliver Nightingale
528
+ */
529
+
530
+ /**
531
+ * A vector is used to construct the vector space of documents and queries. These
532
+ * vectors support operations to determine the similarity between two documents or
533
+ * a document and a query.
534
+ *
535
+ * Normally no parameters are required for initializing a vector, but in the case of
536
+ * loading a previously dumped vector the raw elements can be provided to the constructor.
537
+ *
538
+ * For performance reasons vectors are implemented with a flat array, where an elements
539
+ * index is immediately followed by its value. E.g. [index, value, index, value]. This
540
+ * allows the underlying array to be as sparse as possible and still offer decent
541
+ * performance when being used for vector calculations.
542
+ *
543
+ * @constructor
544
+ * @param {Number[]} [elements] - The flat list of element index and element value pairs.
545
+ */
546
+ lunr.Vector = function (elements) {
547
+ this._magnitude = 0
548
+ this.elements = elements || []
549
+ }
550
+
551
+
552
+ /**
553
+ * Calculates the position within the vector to insert a given index.
554
+ *
555
+ * This is used internally by insert and upsert. If there are duplicate indexes then
556
+ * the position is returned as if the value for that index were to be updated, but it
557
+ * is the callers responsibility to check whether there is a duplicate at that index
558
+ *
559
+ * @param {Number} insertIdx - The index at which the element should be inserted.
560
+ * @returns {Number}
561
+ */
562
+ lunr.Vector.prototype.positionForIndex = function (index) {
563
+ // For an empty vector the tuple can be inserted at the beginning
564
+ if (this.elements.length == 0) {
565
+ return 0
566
+ }
567
+
568
+ var start = 0,
569
+ end = this.elements.length / 2,
570
+ sliceLength = end - start,
571
+ pivotPoint = Math.floor(sliceLength / 2),
572
+ pivotIndex = this.elements[pivotPoint * 2]
573
+
574
+ while (sliceLength > 1) {
575
+ if (pivotIndex < index) {
576
+ start = pivotPoint
577
+ }
578
+
579
+ if (pivotIndex > index) {
580
+ end = pivotPoint
581
+ }
582
+
583
+ if (pivotIndex == index) {
584
+ break
585
+ }
586
+
587
+ sliceLength = end - start
588
+ pivotPoint = start + Math.floor(sliceLength / 2)
589
+ pivotIndex = this.elements[pivotPoint * 2]
590
+ }
591
+
592
+ if (pivotIndex == index) {
593
+ return pivotPoint * 2
594
+ }
595
+
596
+ if (pivotIndex > index) {
597
+ return pivotPoint * 2
598
+ }
599
+
600
+ if (pivotIndex < index) {
601
+ return (pivotPoint + 1) * 2
602
+ }
603
+ }
604
+
605
+ /**
606
+ * Inserts an element at an index within the vector.
607
+ *
608
+ * Does not allow duplicates, will throw an error if there is already an entry
609
+ * for this index.
610
+ *
611
+ * @param {Number} insertIdx - The index at which the element should be inserted.
612
+ * @param {Number} val - The value to be inserted into the vector.
613
+ */
614
+ lunr.Vector.prototype.insert = function (insertIdx, val) {
615
+ this.upsert(insertIdx, val, function () {
616
+ throw "duplicate index"
617
+ })
618
+ }
619
+
620
+ /**
621
+ * Inserts or updates an existing index within the vector.
622
+ *
623
+ * @param {Number} insertIdx - The index at which the element should be inserted.
624
+ * @param {Number} val - The value to be inserted into the vector.
625
+ * @param {function} fn - A function that is called for updates, the existing value and the
626
+ * requested value are passed as arguments
627
+ */
628
+ lunr.Vector.prototype.upsert = function (insertIdx, val, fn) {
629
+ this._magnitude = 0
630
+ var position = this.positionForIndex(insertIdx)
631
+
632
+ if (this.elements[position] == insertIdx) {
633
+ this.elements[position + 1] = fn(this.elements[position + 1], val)
634
+ } else {
635
+ this.elements.splice(position, 0, insertIdx, val)
636
+ }
637
+ }
638
+
639
+ /**
640
+ * Calculates the magnitude of this vector.
641
+ *
642
+ * @returns {Number}
643
+ */
644
+ lunr.Vector.prototype.magnitude = function () {
645
+ if (this._magnitude) return this._magnitude
646
+
647
+ var sumOfSquares = 0,
648
+ elementsLength = this.elements.length
649
+
650
+ for (var i = 1; i < elementsLength; i += 2) {
651
+ var val = this.elements[i]
652
+ sumOfSquares += val * val
653
+ }
654
+
655
+ return this._magnitude = Math.sqrt(sumOfSquares)
656
+ }
657
+
658
+ /**
659
+ * Calculates the dot product of this vector and another vector.
660
+ *
661
+ * @param {lunr.Vector} otherVector - The vector to compute the dot product with.
662
+ * @returns {Number}
663
+ */
664
+ lunr.Vector.prototype.dot = function (otherVector) {
665
+ var dotProduct = 0,
666
+ a = this.elements, b = otherVector.elements,
667
+ aLen = a.length, bLen = b.length,
668
+ aVal = 0, bVal = 0,
669
+ i = 0, j = 0
670
+
671
+ while (i < aLen && j < bLen) {
672
+ aVal = a[i], bVal = b[j]
673
+ if (aVal < bVal) {
674
+ i += 2
675
+ } else if (aVal > bVal) {
676
+ j += 2
677
+ } else if (aVal == bVal) {
678
+ dotProduct += a[i + 1] * b[j + 1]
679
+ i += 2
680
+ j += 2
681
+ }
682
+ }
683
+
684
+ return dotProduct
685
+ }
686
+
687
+ /**
688
+ * Calculates the cosine similarity between this vector and another
689
+ * vector.
690
+ *
691
+ * @param {lunr.Vector} otherVector - The other vector to calculate the
692
+ * similarity with.
693
+ * @returns {Number}
694
+ */
695
+ lunr.Vector.prototype.similarity = function (otherVector) {
696
+ return this.dot(otherVector) / (this.magnitude() * otherVector.magnitude())
697
+ }
698
+
699
+ /**
700
+ * Converts the vector to an array of the elements within the vector.
701
+ *
702
+ * @returns {Number[]}
703
+ */
704
+ lunr.Vector.prototype.toArray = function () {
705
+ var output = new Array (this.elements.length / 2)
706
+
707
+ for (var i = 1, j = 0; i < this.elements.length; i += 2, j++) {
708
+ output[j] = this.elements[i]
709
+ }
710
+
711
+ return output
712
+ }
713
+
714
+ /**
715
+ * A JSON serializable representation of the vector.
716
+ *
717
+ * @returns {Number[]}
718
+ */
719
+ lunr.Vector.prototype.toJSON = function () {
720
+ return this.elements
721
+ }
722
+ /* eslint-disable */
723
+ /*!
724
+ * lunr.stemmer
725
+ * Copyright (C) 2017 Oliver Nightingale
726
+ * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt
727
+ */
728
+
729
+ /**
730
+ * lunr.stemmer is an english language stemmer, this is a JavaScript
731
+ * implementation of the PorterStemmer taken from http://tartarus.org/~martin
732
+ *
733
+ * @static
734
+ * @implements {lunr.PipelineFunction}
735
+ * @param {lunr.Token} token - The string to stem
736
+ * @returns {lunr.Token}
737
+ * @see {@link lunr.Pipeline}
738
+ */
739
+ lunr.stemmer = (function(){
740
+ var step2list = {
741
+ "ational" : "ate",
742
+ "tional" : "tion",
743
+ "enci" : "ence",
744
+ "anci" : "ance",
745
+ "izer" : "ize",
746
+ "bli" : "ble",
747
+ "alli" : "al",
748
+ "entli" : "ent",
749
+ "eli" : "e",
750
+ "ousli" : "ous",
751
+ "ization" : "ize",
752
+ "ation" : "ate",
753
+ "ator" : "ate",
754
+ "alism" : "al",
755
+ "iveness" : "ive",
756
+ "fulness" : "ful",
757
+ "ousness" : "ous",
758
+ "aliti" : "al",
759
+ "iviti" : "ive",
760
+ "biliti" : "ble",
761
+ "logi" : "log"
762
+ },
763
+
764
+ step3list = {
765
+ "icate" : "ic",
766
+ "ative" : "",
767
+ "alize" : "al",
768
+ "iciti" : "ic",
769
+ "ical" : "ic",
770
+ "ful" : "",
771
+ "ness" : ""
772
+ },
773
+
774
+ c = "[^aeiou]", // consonant
775
+ v = "[aeiouy]", // vowel
776
+ C = c + "[^aeiouy]*", // consonant sequence
777
+ V = v + "[aeiou]*", // vowel sequence
778
+
779
+ mgr0 = "^(" + C + ")?" + V + C, // [C]VC... is m>0
780
+ meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$", // [C]VC[V] is m=1
781
+ mgr1 = "^(" + C + ")?" + V + C + V + C, // [C]VCVC... is m>1
782
+ s_v = "^(" + C + ")?" + v; // vowel in stem
783
+
784
+ var re_mgr0 = new RegExp(mgr0);
785
+ var re_mgr1 = new RegExp(mgr1);
786
+ var re_meq1 = new RegExp(meq1);
787
+ var re_s_v = new RegExp(s_v);
788
+
789
+ var re_1a = /^(.+?)(ss|i)es$/;
790
+ var re2_1a = /^(.+?)([^s])s$/;
791
+ var re_1b = /^(.+?)eed$/;
792
+ var re2_1b = /^(.+?)(ed|ing)$/;
793
+ var re_1b_2 = /.$/;
794
+ var re2_1b_2 = /(at|bl|iz)$/;
795
+ var re3_1b_2 = new RegExp("([^aeiouylsz])\\1$");
796
+ var re4_1b_2 = new RegExp("^" + C + v + "[^aeiouwxy]$");
797
+
798
+ var re_1c = /^(.+?[^aeiou])y$/;
799
+ var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
800
+
801
+ var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
802
+
803
+ var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
804
+ var re2_4 = /^(.+?)(s|t)(ion)$/;
805
+
806
+ var re_5 = /^(.+?)e$/;
807
+ var re_5_1 = /ll$/;
808
+ var re3_5 = new RegExp("^" + C + v + "[^aeiouwxy]$");
809
+
810
+ var porterStemmer = function porterStemmer(w) {
811
+ var stem,
812
+ suffix,
813
+ firstch,
814
+ re,
815
+ re2,
816
+ re3,
817
+ re4;
818
+
819
+ if (w.length < 3) { return w; }
820
+
821
+ firstch = w.substr(0,1);
822
+ if (firstch == "y") {
823
+ w = firstch.toUpperCase() + w.substr(1);
824
+ }
825
+
826
+ // Step 1a
827
+ re = re_1a
828
+ re2 = re2_1a;
829
+
830
+ if (re.test(w)) { w = w.replace(re,"$1$2"); }
831
+ else if (re2.test(w)) { w = w.replace(re2,"$1$2"); }
832
+
833
+ // Step 1b
834
+ re = re_1b;
835
+ re2 = re2_1b;
836
+ if (re.test(w)) {
837
+ var fp = re.exec(w);
838
+ re = re_mgr0;
839
+ if (re.test(fp[1])) {
840
+ re = re_1b_2;
841
+ w = w.replace(re,"");
842
+ }
843
+ } else if (re2.test(w)) {
844
+ var fp = re2.exec(w);
845
+ stem = fp[1];
846
+ re2 = re_s_v;
847
+ if (re2.test(stem)) {
848
+ w = stem;
849
+ re2 = re2_1b_2;
850
+ re3 = re3_1b_2;
851
+ re4 = re4_1b_2;
852
+ if (re2.test(w)) { w = w + "e"; }
853
+ else if (re3.test(w)) { re = re_1b_2; w = w.replace(re,""); }
854
+ else if (re4.test(w)) { w = w + "e"; }
855
+ }
856
+ }
857
+
858
+ // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say)
859
+ re = re_1c;
860
+ if (re.test(w)) {
861
+ var fp = re.exec(w);
862
+ stem = fp[1];
863
+ w = stem + "i";
864
+ }
865
+
866
+ // Step 2
867
+ re = re_2;
868
+ if (re.test(w)) {
869
+ var fp = re.exec(w);
870
+ stem = fp[1];
871
+ suffix = fp[2];
872
+ re = re_mgr0;
873
+ if (re.test(stem)) {
874
+ w = stem + step2list[suffix];
875
+ }
876
+ }
877
+
878
+ // Step 3
879
+ re = re_3;
880
+ if (re.test(w)) {
881
+ var fp = re.exec(w);
882
+ stem = fp[1];
883
+ suffix = fp[2];
884
+ re = re_mgr0;
885
+ if (re.test(stem)) {
886
+ w = stem + step3list[suffix];
887
+ }
888
+ }
889
+
890
+ // Step 4
891
+ re = re_4;
892
+ re2 = re2_4;
893
+ if (re.test(w)) {
894
+ var fp = re.exec(w);
895
+ stem = fp[1];
896
+ re = re_mgr1;
897
+ if (re.test(stem)) {
898
+ w = stem;
899
+ }
900
+ } else if (re2.test(w)) {
901
+ var fp = re2.exec(w);
902
+ stem = fp[1] + fp[2];
903
+ re2 = re_mgr1;
904
+ if (re2.test(stem)) {
905
+ w = stem;
906
+ }
907
+ }
908
+
909
+ // Step 5
910
+ re = re_5;
911
+ if (re.test(w)) {
912
+ var fp = re.exec(w);
913
+ stem = fp[1];
914
+ re = re_mgr1;
915
+ re2 = re_meq1;
916
+ re3 = re3_5;
917
+ if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) {
918
+ w = stem;
919
+ }
920
+ }
921
+
922
+ re = re_5_1;
923
+ re2 = re_mgr1;
924
+ if (re.test(w) && re2.test(w)) {
925
+ re = re_1b_2;
926
+ w = w.replace(re,"");
927
+ }
928
+
929
+ // and turn initial Y back to y
930
+
931
+ if (firstch == "y") {
932
+ w = firstch.toLowerCase() + w.substr(1);
933
+ }
934
+
935
+ return w;
936
+ };
937
+
938
+ return function (token) {
939
+ return token.update(porterStemmer);
940
+ }
941
+ })();
942
+
943
+ lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer')
944
+ /*!
945
+ * lunr.stopWordFilter
946
+ * Copyright (C) 2017 Oliver Nightingale
947
+ */
948
+
949
+ /**
950
+ * lunr.generateStopWordFilter builds a stopWordFilter function from the provided
951
+ * list of stop words.
952
+ *
953
+ * The built in lunr.stopWordFilter is built using this generator and can be used
954
+ * to generate custom stopWordFilters for applications or non English languages.
955
+ *
956
+ * @param {Array} token The token to pass through the filter
957
+ * @returns {lunr.PipelineFunction}
958
+ * @see lunr.Pipeline
959
+ * @see lunr.stopWordFilter
960
+ */
961
+ lunr.generateStopWordFilter = function (stopWords) {
962
+ var words = stopWords.reduce(function (memo, stopWord) {
963
+ memo[stopWord] = stopWord
964
+ return memo
965
+ }, {})
966
+
967
+ return function (token) {
968
+ if (token && words[token.toString()] !== token.toString()) return token
969
+ }
970
+ }
971
+
972
+ /**
973
+ * lunr.stopWordFilter is an English language stop word list filter, any words
974
+ * contained in the list will not be passed through the filter.
975
+ *
976
+ * This is intended to be used in the Pipeline. If the token does not pass the
977
+ * filter then undefined will be returned.
978
+ *
979
+ * @implements {lunr.PipelineFunction}
980
+ * @params {lunr.Token} token - A token to check for being a stop word.
981
+ * @returns {lunr.Token}
982
+ * @see {@link lunr.Pipeline}
983
+ */
984
+ lunr.stopWordFilter = lunr.generateStopWordFilter([
985
+ 'a',
986
+ 'able',
987
+ 'about',
988
+ 'across',
989
+ 'after',
990
+ 'all',
991
+ 'almost',
992
+ 'also',
993
+ 'am',
994
+ 'among',
995
+ 'an',
996
+ 'and',
997
+ 'any',
998
+ 'are',
999
+ 'as',
1000
+ 'at',
1001
+ 'be',
1002
+ 'because',
1003
+ 'been',
1004
+ 'but',
1005
+ 'by',
1006
+ 'can',
1007
+ 'cannot',
1008
+ 'could',
1009
+ 'dear',
1010
+ 'did',
1011
+ 'do',
1012
+ 'does',
1013
+ 'either',
1014
+ 'else',
1015
+ 'ever',
1016
+ 'every',
1017
+ 'for',
1018
+ 'from',
1019
+ 'get',
1020
+ 'got',
1021
+ 'had',
1022
+ 'has',
1023
+ 'have',
1024
+ 'he',
1025
+ 'her',
1026
+ 'hers',
1027
+ 'him',
1028
+ 'his',
1029
+ 'how',
1030
+ 'however',
1031
+ 'i',
1032
+ 'if',
1033
+ 'in',
1034
+ 'into',
1035
+ 'is',
1036
+ 'it',
1037
+ 'its',
1038
+ 'just',
1039
+ 'least',
1040
+ 'let',
1041
+ 'like',
1042
+ 'likely',
1043
+ 'may',
1044
+ 'me',
1045
+ 'might',
1046
+ 'most',
1047
+ 'must',
1048
+ 'my',
1049
+ 'neither',
1050
+ 'no',
1051
+ 'nor',
1052
+ 'not',
1053
+ 'of',
1054
+ 'off',
1055
+ 'often',
1056
+ 'on',
1057
+ 'only',
1058
+ 'or',
1059
+ 'other',
1060
+ 'our',
1061
+ 'own',
1062
+ 'rather',
1063
+ 'said',
1064
+ 'say',
1065
+ 'says',
1066
+ 'she',
1067
+ 'should',
1068
+ 'since',
1069
+ 'so',
1070
+ 'some',
1071
+ 'than',
1072
+ 'that',
1073
+ 'the',
1074
+ 'their',
1075
+ 'them',
1076
+ 'then',
1077
+ 'there',
1078
+ 'these',
1079
+ 'they',
1080
+ 'this',
1081
+ 'tis',
1082
+ 'to',
1083
+ 'too',
1084
+ 'twas',
1085
+ 'us',
1086
+ 'wants',
1087
+ 'was',
1088
+ 'we',
1089
+ 'were',
1090
+ 'what',
1091
+ 'when',
1092
+ 'where',
1093
+ 'which',
1094
+ 'while',
1095
+ 'who',
1096
+ 'whom',
1097
+ 'why',
1098
+ 'will',
1099
+ 'with',
1100
+ 'would',
1101
+ 'yet',
1102
+ 'you',
1103
+ 'your'
1104
+ ])
1105
+
1106
+ lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter')
1107
+ /*!
1108
+ * lunr.trimmer
1109
+ * Copyright (C) 2017 Oliver Nightingale
1110
+ */
1111
+
1112
+ /**
1113
+ * lunr.trimmer is a pipeline function for trimming non word
1114
+ * characters from the beginning and end of tokens before they
1115
+ * enter the index.
1116
+ *
1117
+ * This implementation may not work correctly for non latin
1118
+ * characters and should either be removed or adapted for use
1119
+ * with languages with non-latin characters.
1120
+ *
1121
+ * @static
1122
+ * @implements {lunr.PipelineFunction}
1123
+ * @param {lunr.Token} token The token to pass through the filter
1124
+ * @returns {lunr.Token}
1125
+ * @see lunr.Pipeline
1126
+ */
1127
+ lunr.trimmer = function (token) {
1128
+ return token.update(function (s) {
1129
+ return s.replace(/^\W+/, '').replace(/\W+$/, '')
1130
+ })
1131
+ }
1132
+
1133
+ lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer')
1134
+ /*!
1135
+ * lunr.TokenSet
1136
+ * Copyright (C) 2017 Oliver Nightingale
1137
+ */
1138
+
1139
+ /**
1140
+ * A token set is used to store the unique list of all tokens
1141
+ * within an index. Token sets are also used to represent an
1142
+ * incoming query to the index, this query token set and index
1143
+ * token set are then intersected to find which tokens to look
1144
+ * up in the inverted index.
1145
+ *
1146
+ * A token set can hold multiple tokens, as in the case of the
1147
+ * index token set, or it can hold a single token as in the
1148
+ * case of a simple query token set.
1149
+ *
1150
+ * Additionally token sets are used to perform wildcard matching.
1151
+ * Leading, contained and trailing wildcards are supported, and
1152
+ * from this edit distance matching can also be provided.
1153
+ *
1154
+ * Token sets are implemented as a minimal finite state automata,
1155
+ * where both common prefixes and suffixes are shared between tokens.
1156
+ * This helps to reduce the space used for storing the token set.
1157
+ *
1158
+ * @constructor
1159
+ */
1160
+ lunr.TokenSet = function () {
1161
+ this.final = false
1162
+ this.edges = {}
1163
+ this.id = lunr.TokenSet._nextId
1164
+ lunr.TokenSet._nextId += 1
1165
+ }
1166
+
1167
+ /**
1168
+ * Keeps track of the next, auto increment, identifier to assign
1169
+ * to a new tokenSet.
1170
+ *
1171
+ * TokenSets require a unique identifier to be correctly minimised.
1172
+ *
1173
+ * @private
1174
+ */
1175
+ lunr.TokenSet._nextId = 1
1176
+
1177
+ /**
1178
+ * Creates a TokenSet instance from the given sorted array of words.
1179
+ *
1180
+ * @param {String[]} arr - A sorted array of strings to create the set from.
1181
+ * @returns {lunr.TokenSet}
1182
+ * @throws Will throw an error if the input array is not sorted.
1183
+ */
1184
+ lunr.TokenSet.fromArray = function (arr) {
1185
+ var builder = new lunr.TokenSet.Builder
1186
+
1187
+ for (var i = 0, len = arr.length; i < len; i++) {
1188
+ builder.insert(arr[i])
1189
+ }
1190
+
1191
+ builder.finish()
1192
+ return builder.root
1193
+ }
1194
+
1195
+ /**
1196
+ * Creates a token set from a query clause.
1197
+ *
1198
+ * @private
1199
+ * @param {Object} clause - A single clause from lunr.Query.
1200
+ * @param {string} clause.term - The query clause term.
1201
+ * @param {number} [clause.editDistance] - The optional edit distance for the term.
1202
+ * @returns {lunr.TokenSet}
1203
+ */
1204
+ lunr.TokenSet.fromClause = function (clause) {
1205
+ if ('editDistance' in clause) {
1206
+ return lunr.TokenSet.fromFuzzyString(clause.term, clause.editDistance)
1207
+ } else {
1208
+ return lunr.TokenSet.fromString(clause.term)
1209
+ }
1210
+ }
1211
+
1212
+ /**
1213
+ * Creates a token set representing a single string with a specified
1214
+ * edit distance.
1215
+ *
1216
+ * Insertions, deletions, substitutions and transpositions are each
1217
+ * treated as an edit distance of 1.
1218
+ *
1219
+ * Increasing the allowed edit distance will have a dramatic impact
1220
+ * on the performance of both creating and intersecting these TokenSets.
1221
+ * It is advised to keep the edit distance less than 3.
1222
+ *
1223
+ * @param {string} str - The string to create the token set from.
1224
+ * @param {number} editDistance - The allowed edit distance to match.
1225
+ * @returns {lunr.Vector}
1226
+ */
1227
+ lunr.TokenSet.fromFuzzyString = function (str, editDistance) {
1228
+ var root = new lunr.TokenSet
1229
+
1230
+ var stack = [{
1231
+ node: root,
1232
+ editsRemaining: editDistance,
1233
+ str: str
1234
+ }]
1235
+
1236
+ while (stack.length) {
1237
+ var frame = stack.pop()
1238
+
1239
+ // no edit
1240
+ if (frame.str.length > 0) {
1241
+ var char = frame.str.charAt(0),
1242
+ noEditNode
1243
+
1244
+ if (char in frame.node.edges) {
1245
+ noEditNode = frame.node.edges[char]
1246
+ } else {
1247
+ noEditNode = new lunr.TokenSet
1248
+ frame.node.edges[char] = noEditNode
1249
+ }
1250
+
1251
+ if (frame.str.length == 1) {
1252
+ noEditNode.final = true
1253
+ } else {
1254
+ stack.push({
1255
+ node: noEditNode,
1256
+ editsRemaining: frame.editsRemaining,
1257
+ str: frame.str.slice(1)
1258
+ })
1259
+ }
1260
+ }
1261
+
1262
+ // deletion
1263
+ // can only do a deletion if we have enough edits remaining
1264
+ // and if there are characters left to delete in the string
1265
+ if (frame.editsRemaining > 0 && frame.str.length > 1) {
1266
+ var char = frame.str.charAt(1),
1267
+ deletionNode
1268
+
1269
+ if (char in frame.node.edges) {
1270
+ deletionNode = frame.node.edges[char]
1271
+ } else {
1272
+ deletionNode = new lunr.TokenSet
1273
+ frame.node.edges[char] = deletionNode
1274
+ }
1275
+
1276
+ if (frame.str.length <= 2) {
1277
+ deletionNode.final = true
1278
+ } else {
1279
+ stack.push({
1280
+ node: deletionNode,
1281
+ editsRemaining: frame.editsRemaining - 1,
1282
+ str: frame.str.slice(2)
1283
+ })
1284
+ }
1285
+ }
1286
+
1287
+ // deletion
1288
+ // just removing the last character from the str
1289
+ if (frame.editsRemaining > 0 && frame.str.length == 1) {
1290
+ frame.node.final = true
1291
+ }
1292
+
1293
+ // substitution
1294
+ // can only do a substitution if we have enough edits remaining
1295
+ // and if there are characters left to substitute
1296
+ if (frame.editsRemaining > 0 && frame.str.length >= 1) {
1297
+ if ("*" in frame.node.edges) {
1298
+ var substitutionNode = frame.node.edges["*"]
1299
+ } else {
1300
+ var substitutionNode = new lunr.TokenSet
1301
+ frame.node.edges["*"] = substitutionNode
1302
+ }
1303
+
1304
+ if (frame.str.length == 1) {
1305
+ substitutionNode.final = true
1306
+ } else {
1307
+ stack.push({
1308
+ node: substitutionNode,
1309
+ editsRemaining: frame.editsRemaining - 1,
1310
+ str: frame.str.slice(1)
1311
+ })
1312
+ }
1313
+ }
1314
+
1315
+ // insertion
1316
+ // can only do insertion if there are edits remaining
1317
+ if (frame.editsRemaining > 0) {
1318
+ if ("*" in frame.node.edges) {
1319
+ var insertionNode = frame.node.edges["*"]
1320
+ } else {
1321
+ var insertionNode = new lunr.TokenSet
1322
+ frame.node.edges["*"] = insertionNode
1323
+ }
1324
+
1325
+ if (frame.str.length == 0) {
1326
+ insertionNode.final = true
1327
+ } else {
1328
+ stack.push({
1329
+ node: insertionNode,
1330
+ editsRemaining: frame.editsRemaining - 1,
1331
+ str: frame.str
1332
+ })
1333
+ }
1334
+ }
1335
+
1336
+ // transposition
1337
+ // can only do a transposition if there are edits remaining
1338
+ // and there are enough characters to transpose
1339
+ if (frame.editsRemaining > 0 && frame.str.length > 1) {
1340
+ var charA = frame.str.charAt(0),
1341
+ charB = frame.str.charAt(1),
1342
+ transposeNode
1343
+
1344
+ if (charB in frame.node.edges) {
1345
+ transposeNode = frame.node.edges[charB]
1346
+ } else {
1347
+ transposeNode = new lunr.TokenSet
1348
+ frame.node.edges[charB] = transposeNode
1349
+ }
1350
+
1351
+ if (frame.str.length == 1) {
1352
+ transposeNode.final = true
1353
+ } else {
1354
+ stack.push({
1355
+ node: transposeNode,
1356
+ editsRemaining: frame.editsRemaining - 1,
1357
+ str: charA + frame.str.slice(2)
1358
+ })
1359
+ }
1360
+ }
1361
+ }
1362
+
1363
+ return root
1364
+ }
1365
+
1366
+ /**
1367
+ * Creates a TokenSet from a string.
1368
+ *
1369
+ * The string may contain one or more wildcard characters (*)
1370
+ * that will allow wildcard matching when intersecting with
1371
+ * another TokenSet.
1372
+ *
1373
+ * @param {string} str - The string to create a TokenSet from.
1374
+ * @returns {lunr.TokenSet}
1375
+ */
1376
+ lunr.TokenSet.fromString = function (str) {
1377
+ var node = new lunr.TokenSet,
1378
+ root = node,
1379
+ wildcardFound = false
1380
+
1381
+ /*
1382
+ * Iterates through all characters within the passed string
1383
+ * appending a node for each character.
1384
+ *
1385
+ * As soon as a wildcard character is found then a self
1386
+ * referencing edge is introduced to continually match
1387
+ * any number of any characters.
1388
+ */
1389
+ for (var i = 0, len = str.length; i < len; i++) {
1390
+ var char = str[i],
1391
+ final = (i == len - 1)
1392
+
1393
+ if (char == "*") {
1394
+ wildcardFound = true
1395
+ node.edges[char] = node
1396
+ node.final = final
1397
+
1398
+ } else {
1399
+ var next = new lunr.TokenSet
1400
+ next.final = final
1401
+
1402
+ node.edges[char] = next
1403
+ node = next
1404
+
1405
+ // TODO: is this needed anymore?
1406
+ if (wildcardFound) {
1407
+ node.edges["*"] = root
1408
+ }
1409
+ }
1410
+ }
1411
+
1412
+ return root
1413
+ }
1414
+
1415
+ /**
1416
+ * Converts this TokenSet into an array of strings
1417
+ * contained within the TokenSet.
1418
+ *
1419
+ * @returns {string[]}
1420
+ */
1421
+ lunr.TokenSet.prototype.toArray = function () {
1422
+ var words = []
1423
+
1424
+ var stack = [{
1425
+ prefix: "",
1426
+ node: this
1427
+ }]
1428
+
1429
+ while (stack.length) {
1430
+ var frame = stack.pop(),
1431
+ edges = Object.keys(frame.node.edges),
1432
+ len = edges.length
1433
+
1434
+ if (frame.node.final) {
1435
+ words.push(frame.prefix)
1436
+ }
1437
+
1438
+ for (var i = 0; i < len; i++) {
1439
+ var edge = edges[i]
1440
+
1441
+ stack.push({
1442
+ prefix: frame.prefix.concat(edge),
1443
+ node: frame.node.edges[edge]
1444
+ })
1445
+ }
1446
+ }
1447
+
1448
+ return words
1449
+ }
1450
+
1451
+ /**
1452
+ * Generates a string representation of a TokenSet.
1453
+ *
1454
+ * This is intended to allow TokenSets to be used as keys
1455
+ * in objects, largely to aid the construction and minimisation
1456
+ * of a TokenSet. As such it is not designed to be a human
1457
+ * friendly representation of the TokenSet.
1458
+ *
1459
+ * @returns {string}
1460
+ */
1461
+ lunr.TokenSet.prototype.toString = function () {
1462
+ // NOTE: Using Object.keys here as this.edges is very likely
1463
+ // to enter 'hash-mode' with many keys being added
1464
+ //
1465
+ // avoiding a for-in loop here as it leads to the function
1466
+ // being de-optimised (at least in V8). From some simple
1467
+ // benchmarks the performance is comparable, but allowing
1468
+ // V8 to optimize may mean easy performance wins in the future.
1469
+
1470
+ if (this._str) {
1471
+ return this._str
1472
+ }
1473
+
1474
+ var str = this.final ? '1' : '0',
1475
+ labels = Object.keys(this.edges).sort(),
1476
+ len = labels.length
1477
+
1478
+ for (var i = 0; i < len; i++) {
1479
+ var label = labels[i],
1480
+ node = this.edges[label]
1481
+
1482
+ str = str + label + node.id
1483
+ }
1484
+
1485
+ return str
1486
+ }
1487
+
1488
+ /**
1489
+ * Returns a new TokenSet that is the intersection of
1490
+ * this TokenSet and the passed TokenSet.
1491
+ *
1492
+ * This intersection will take into account any wildcards
1493
+ * contained within the TokenSet.
1494
+ *
1495
+ * @param {lunr.TokenSet} b - An other TokenSet to intersect with.
1496
+ * @returns {lunr.TokenSet}
1497
+ */
1498
+ lunr.TokenSet.prototype.intersect = function (b) {
1499
+ var output = new lunr.TokenSet,
1500
+ frame = undefined
1501
+
1502
+ var stack = [{
1503
+ qNode: b,
1504
+ output: output,
1505
+ node: this
1506
+ }]
1507
+
1508
+ while (stack.length) {
1509
+ frame = stack.pop()
1510
+
1511
+ // NOTE: As with the #toString method, we are using
1512
+ // Object.keys and a for loop instead of a for-in loop
1513
+ // as both of these objects enter 'hash' mode, causing
1514
+ // the function to be de-optimised in V8
1515
+ var qEdges = Object.keys(frame.qNode.edges),
1516
+ qLen = qEdges.length,
1517
+ nEdges = Object.keys(frame.node.edges),
1518
+ nLen = nEdges.length
1519
+
1520
+ for (var q = 0; q < qLen; q++) {
1521
+ var qEdge = qEdges[q]
1522
+
1523
+ for (var n = 0; n < nLen; n++) {
1524
+ var nEdge = nEdges[n]
1525
+
1526
+ if (nEdge == qEdge || qEdge == '*') {
1527
+ var node = frame.node.edges[nEdge],
1528
+ qNode = frame.qNode.edges[qEdge],
1529
+ final = node.final && qNode.final,
1530
+ next = undefined
1531
+
1532
+ if (nEdge in frame.output.edges) {
1533
+ // an edge already exists for this character
1534
+ // no need to create a new node, just set the finality
1535
+ // bit unless this node is already final
1536
+ next = frame.output.edges[nEdge]
1537
+ next.final = next.final || final
1538
+
1539
+ } else {
1540
+ // no edge exists yet, must create one
1541
+ // set the finality bit and insert it
1542
+ // into the output
1543
+ next = new lunr.TokenSet
1544
+ next.final = final
1545
+ frame.output.edges[nEdge] = next
1546
+ }
1547
+
1548
+ stack.push({
1549
+ qNode: qNode,
1550
+ output: next,
1551
+ node: node
1552
+ })
1553
+ }
1554
+ }
1555
+ }
1556
+ }
1557
+
1558
+ return output
1559
+ }
1560
+ lunr.TokenSet.Builder = function () {
1561
+ this.previousWord = ""
1562
+ this.root = new lunr.TokenSet
1563
+ this.uncheckedNodes = []
1564
+ this.minimizedNodes = {}
1565
+ }
1566
+
1567
+ lunr.TokenSet.Builder.prototype.insert = function (word) {
1568
+ var node,
1569
+ commonPrefix = 0
1570
+
1571
+ if (word < this.previousWord) {
1572
+ throw new Error ("Out of order word insertion")
1573
+ }
1574
+
1575
+ for (var i = 0; i < word.length && i < this.previousWord.length; i++) {
1576
+ if (word[i] != this.previousWord[i]) break
1577
+ commonPrefix++
1578
+ }
1579
+
1580
+ this.minimize(commonPrefix)
1581
+
1582
+ if (this.uncheckedNodes.length == 0) {
1583
+ node = this.root
1584
+ } else {
1585
+ node = this.uncheckedNodes[this.uncheckedNodes.length - 1].child
1586
+ }
1587
+
1588
+ for (var i = commonPrefix; i < word.length; i++) {
1589
+ var nextNode = new lunr.TokenSet,
1590
+ char = word[i]
1591
+
1592
+ node.edges[char] = nextNode
1593
+
1594
+ this.uncheckedNodes.push({
1595
+ parent: node,
1596
+ char: char,
1597
+ child: nextNode
1598
+ })
1599
+
1600
+ node = nextNode
1601
+ }
1602
+
1603
+ node.final = true
1604
+ this.previousWord = word
1605
+ }
1606
+
1607
+ lunr.TokenSet.Builder.prototype.finish = function () {
1608
+ this.minimize(0)
1609
+ }
1610
+
1611
+ lunr.TokenSet.Builder.prototype.minimize = function (downTo) {
1612
+ for (var i = this.uncheckedNodes.length - 1; i >= downTo; i--) {
1613
+ var node = this.uncheckedNodes[i],
1614
+ childKey = node.child.toString()
1615
+
1616
+ if (childKey in this.minimizedNodes) {
1617
+ node.parent.edges[node.char] = this.minimizedNodes[childKey]
1618
+ } else {
1619
+ // Cache the key for this node since
1620
+ // we know it can't change anymore
1621
+ node.child._str = childKey
1622
+
1623
+ this.minimizedNodes[childKey] = node.child
1624
+ }
1625
+
1626
+ this.uncheckedNodes.pop()
1627
+ }
1628
+ }
1629
+ /*!
1630
+ * lunr.Index
1631
+ * Copyright (C) 2017 Oliver Nightingale
1632
+ */
1633
+
1634
+ /**
1635
+ * An index contains the built index of all documents and provides a query interface
1636
+ * to the index.
1637
+ *
1638
+ * Usually instances of lunr.Index will not be created using this constructor, instead
1639
+ * lunr.Builder should be used to construct new indexes, or lunr.Index.load should be
1640
+ * used to load previously built and serialized indexes.
1641
+ *
1642
+ * @constructor
1643
+ * @param {Object} attrs - The attributes of the built search index.
1644
+ * @param {Object} attrs.invertedIndex - An index of term/field to document reference.
1645
+ * @param {Object<string, lunr.Vector>} attrs.documentVectors - Document vectors keyed by document reference.
1646
+ * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens.
1647
+ * @param {string[]} attrs.fields - The names of indexed document fields.
1648
+ * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms.
1649
+ */
1650
+ lunr.Index = function (attrs) {
1651
+ this.invertedIndex = attrs.invertedIndex
1652
+ this.fieldVectors = attrs.fieldVectors
1653
+ this.tokenSet = attrs.tokenSet
1654
+ this.fields = attrs.fields
1655
+ this.pipeline = attrs.pipeline
1656
+ }
1657
+
1658
+ /**
1659
+ * A result contains details of a document matching a search query.
1660
+ * @typedef {Object} lunr.Index~Result
1661
+ * @property {string} ref - The reference of the document this result represents.
1662
+ * @property {number} score - A number between 0 and 1 representing how similar this document is to the query.
1663
+ * @property {lunr.MatchData} matchData - Contains metadata about this match including which term(s) caused the match.
1664
+ */
1665
+
1666
+ /**
1667
+ * Although lunr provides the ability to create queries using lunr.Query, it also provides a simple
1668
+ * query language which itself is parsed into an instance of lunr.Query.
1669
+ *
1670
+ * For programmatically building queries it is advised to directly use lunr.Query, the query language
1671
+ * is best used for human entered text rather than program generated text.
1672
+ *
1673
+ * At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported
1674
+ * and will be combined with OR, e.g `hello world` will match documents that contain either 'hello'
1675
+ * or 'world', though those that contain both will rank higher in the results.
1676
+ *
1677
+ * Wildcards can be included in terms to match one or more unspecified characters, these wildcards can
1678
+ * be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding
1679
+ * wildcards will increase the number of documents that will be found but can also have a negative
1680
+ * impact on query performance, especially with wildcards at the beginning of a term.
1681
+ *
1682
+ * Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term
1683
+ * hello in the title field will match this query. Using a field not present in the index will lead
1684
+ * to an error being thrown.
1685
+ *
1686
+ * Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term
1687
+ * boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported
1688
+ * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2.
1689
+ * Avoid large values for edit distance to improve query performance.
1690
+ *
1691
+ * To escape special characters the backslash character '\' can be used, this allows searches to include
1692
+ * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead
1693
+ * of attempting to apply a boost of 2 to the search term "foo".
1694
+ *
1695
+ * @typedef {string} lunr.Index~QueryString
1696
+ * @example <caption>Simple single term query</caption>
1697
+ * hello
1698
+ * @example <caption>Multiple term query</caption>
1699
+ * hello world
1700
+ * @example <caption>term scoped to a field</caption>
1701
+ * title:hello
1702
+ * @example <caption>term with a boost of 10</caption>
1703
+ * hello^10
1704
+ * @example <caption>term with an edit distance of 2</caption>
1705
+ * hello~2
1706
+ */
1707
+
1708
+ /**
1709
+ * Performs a search against the index using lunr query syntax.
1710
+ *
1711
+ * Results will be returned sorted by their score, the most relevant results
1712
+ * will be returned first.
1713
+ *
1714
+ * For more programmatic querying use lunr.Index#query.
1715
+ *
1716
+ * @param {lunr.Index~QueryString} queryString - A string containing a lunr query.
1717
+ * @throws {lunr.QueryParseError} If the passed query string cannot be parsed.
1718
+ * @returns {lunr.Index~Result[]}
1719
+ */
1720
+ lunr.Index.prototype.search = function (queryString) {
1721
+ return this.query(function (query) {
1722
+ var parser = new lunr.QueryParser(queryString, query)
1723
+ parser.parse()
1724
+ })
1725
+ }
1726
+
1727
+ /**
1728
+ * A query builder callback provides a query object to be used to express
1729
+ * the query to perform on the index.
1730
+ *
1731
+ * @callback lunr.Index~queryBuilder
1732
+ * @param {lunr.Query} query - The query object to build up.
1733
+ * @this lunr.Query
1734
+ */
1735
+
1736
+ /**
1737
+ * Performs a query against the index using the yielded lunr.Query object.
1738
+ *
1739
+ * If performing programmatic queries against the index, this method is preferred
1740
+ * over lunr.Index#search so as to avoid the additional query parsing overhead.
1741
+ *
1742
+ * A query object is yielded to the supplied function which should be used to
1743
+ * express the query to be run against the index.
1744
+ *
1745
+ * Note that although this function takes a callback parameter it is _not_ an
1746
+ * asynchronous operation, the callback is just yielded a query object to be
1747
+ * customized.
1748
+ *
1749
+ * @param {lunr.Index~queryBuilder} fn - A function that is used to build the query.
1750
+ * @returns {lunr.Index~Result[]}
1751
+ */
1752
+ lunr.Index.prototype.query = function (fn) {
1753
+ // for each query clause
1754
+ // * process terms
1755
+ // * expand terms from token set
1756
+ // * find matching documents and metadata
1757
+ // * get document vectors
1758
+ // * score documents
1759
+
1760
+ var query = new lunr.Query(this.fields),
1761
+ matchingFields = Object.create(null),
1762
+ queryVectors = Object.create(null)
1763
+
1764
+ fn.call(query, query)
1765
+
1766
+ for (var i = 0; i < query.clauses.length; i++) {
1767
+ /*
1768
+ * Unless the pipeline has been disabled for this term, which is
1769
+ * the case for terms with wildcards, we need to pass the clause
1770
+ * term through the search pipeline. A pipeline returns an array
1771
+ * of processed terms. Pipeline functions may expand the passed
1772
+ * term, which means we may end up performing multiple index lookups
1773
+ * for a single query term.
1774
+ */
1775
+ var clause = query.clauses[i],
1776
+ terms = null
1777
+
1778
+ if (clause.usePipeline) {
1779
+ terms = this.pipeline.runString(clause.term)
1780
+ } else {
1781
+ terms = [clause.term]
1782
+ }
1783
+
1784
+ for (var m = 0; m < terms.length; m++) {
1785
+ var term = terms[m]
1786
+
1787
+ /*
1788
+ * Each term returned from the pipeline needs to use the same query
1789
+ * clause object, e.g. the same boost and or edit distance. The
1790
+ * simplest way to do this is to re-use the clause object but mutate
1791
+ * its term property.
1792
+ */
1793
+ clause.term = term
1794
+
1795
+ /*
1796
+ * From the term in the clause we create a token set which will then
1797
+ * be used to intersect the indexes token set to get a list of terms
1798
+ * to lookup in the inverted index
1799
+ */
1800
+ var termTokenSet = lunr.TokenSet.fromClause(clause),
1801
+ expandedTerms = this.tokenSet.intersect(termTokenSet).toArray()
1802
+
1803
+ for (var j = 0; j < expandedTerms.length; j++) {
1804
+ /*
1805
+ * For each term get the posting and termIndex, this is required for
1806
+ * building the query vector.
1807
+ */
1808
+ var expandedTerm = expandedTerms[j],
1809
+ posting = this.invertedIndex[expandedTerm],
1810
+ termIndex = posting._index
1811
+
1812
+ for (var k = 0; k < clause.fields.length; k++) {
1813
+ /*
1814
+ * For each field that this query term is scoped by (by default
1815
+ * all fields are in scope) we need to get all the document refs
1816
+ * that have this term in that field.
1817
+ *
1818
+ * The posting is the entry in the invertedIndex for the matching
1819
+ * term from above.
1820
+ */
1821
+ var field = clause.fields[k],
1822
+ fieldPosting = posting[field],
1823
+ matchingDocumentRefs = Object.keys(fieldPosting)
1824
+
1825
+ /*
1826
+ * To support field level boosts a query vector is created per
1827
+ * field. This vector is populated using the termIndex found for
1828
+ * the term and a unit value with the appropriate boost applied.
1829
+ *
1830
+ * If the query vector for this field does not exist yet it needs
1831
+ * to be created.
1832
+ */
1833
+ if (!(field in queryVectors)) {
1834
+ queryVectors[field] = new lunr.Vector
1835
+ }
1836
+
1837
+ /*
1838
+ * Using upsert because there could already be an entry in the vector
1839
+ * for the term we are working with. In that case we just add the scores
1840
+ * together.
1841
+ */
1842
+ queryVectors[field].upsert(termIndex, 1 * clause.boost, function (a, b) { return a + b })
1843
+
1844
+ for (var l = 0; l < matchingDocumentRefs.length; l++) {
1845
+ /*
1846
+ * All metadata for this term/field/document triple
1847
+ * are then extracted and collected into an instance
1848
+ * of lunr.MatchData ready to be returned in the query
1849
+ * results
1850
+ */
1851
+ var matchingDocumentRef = matchingDocumentRefs[l],
1852
+ matchingFieldRef = new lunr.FieldRef (matchingDocumentRef, field),
1853
+ documentMetadata, matchData
1854
+
1855
+ documentMetadata = fieldPosting[matchingDocumentRef]
1856
+ matchData = new lunr.MatchData (expandedTerm, field, documentMetadata)
1857
+
1858
+ if (matchingFieldRef in matchingFields) {
1859
+ matchingFields[matchingFieldRef].combine(matchData)
1860
+ } else {
1861
+ matchingFields[matchingFieldRef] = matchData
1862
+ }
1863
+
1864
+ }
1865
+ }
1866
+ }
1867
+ }
1868
+ }
1869
+
1870
+ var matchingFieldRefs = Object.keys(matchingFields),
1871
+ results = {}
1872
+
1873
+ for (var i = 0; i < matchingFieldRefs.length; i++) {
1874
+ /*
1875
+ * Currently we have document fields that match the query, but we
1876
+ * need to return documents. The matchData and scores are combined
1877
+ * from multiple fields belonging to the same document.
1878
+ *
1879
+ * Scores are calculated by field, using the query vectors created
1880
+ * above, and combined into a final document score using addition.
1881
+ */
1882
+ var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]),
1883
+ docRef = fieldRef.docRef,
1884
+ fieldVector = this.fieldVectors[fieldRef],
1885
+ score = queryVectors[fieldRef.fieldName].similarity(fieldVector)
1886
+
1887
+ if (docRef in results) {
1888
+ results[docRef].score += score
1889
+ results[docRef].matchData.combine(matchingFields[fieldRef])
1890
+ } else {
1891
+ results[docRef] = {
1892
+ ref: docRef,
1893
+ score: score,
1894
+ matchData: matchingFields[fieldRef]
1895
+ }
1896
+ }
1897
+ }
1898
+
1899
+ /*
1900
+ * The results object needs to be converted into a list
1901
+ * of results, sorted by score before being returned.
1902
+ */
1903
+ return Object.keys(results)
1904
+ .map(function (key) {
1905
+ return results[key]
1906
+ })
1907
+ .sort(function (a, b) {
1908
+ return b.score - a.score
1909
+ })
1910
+ }
1911
+
1912
+ /**
1913
+ * Prepares the index for JSON serialization.
1914
+ *
1915
+ * The schema for this JSON blob will be described in a
1916
+ * separate JSON schema file.
1917
+ *
1918
+ * @returns {Object}
1919
+ */
1920
+ lunr.Index.prototype.toJSON = function () {
1921
+ var invertedIndex = Object.keys(this.invertedIndex)
1922
+ .sort()
1923
+ .map(function (term) {
1924
+ return [term, this.invertedIndex[term]]
1925
+ }, this)
1926
+
1927
+ var fieldVectors = Object.keys(this.fieldVectors)
1928
+ .map(function (ref) {
1929
+ return [ref, this.fieldVectors[ref].toJSON()]
1930
+ }, this)
1931
+
1932
+ return {
1933
+ version: lunr.version,
1934
+ fields: this.fields,
1935
+ fieldVectors: fieldVectors,
1936
+ invertedIndex: invertedIndex,
1937
+ pipeline: this.pipeline.toJSON()
1938
+ }
1939
+ }
1940
+
1941
+ /**
1942
+ * Loads a previously serialized lunr.Index
1943
+ *
1944
+ * @param {Object} serializedIndex - A previously serialized lunr.Index
1945
+ * @returns {lunr.Index}
1946
+ */
1947
+ lunr.Index.load = function (serializedIndex) {
1948
+ var attrs = {},
1949
+ fieldVectors = {},
1950
+ serializedVectors = serializedIndex.fieldVectors,
1951
+ invertedIndex = {},
1952
+ serializedInvertedIndex = serializedIndex.invertedIndex,
1953
+ tokenSetBuilder = new lunr.TokenSet.Builder,
1954
+ pipeline = lunr.Pipeline.load(serializedIndex.pipeline)
1955
+
1956
+ if (serializedIndex.version != lunr.version) {
1957
+ lunr.utils.warn("Version mismatch when loading serialised index. Current version of lunr '" + lunr.version + "' does not match serialized index '" + serializedIndex.version + "'")
1958
+ }
1959
+
1960
+ for (var i = 0; i < serializedVectors.length; i++) {
1961
+ var tuple = serializedVectors[i],
1962
+ ref = tuple[0],
1963
+ elements = tuple[1]
1964
+
1965
+ fieldVectors[ref] = new lunr.Vector(elements)
1966
+ }
1967
+
1968
+ for (var i = 0; i < serializedInvertedIndex.length; i++) {
1969
+ var tuple = serializedInvertedIndex[i],
1970
+ term = tuple[0],
1971
+ posting = tuple[1]
1972
+
1973
+ tokenSetBuilder.insert(term)
1974
+ invertedIndex[term] = posting
1975
+ }
1976
+
1977
+ tokenSetBuilder.finish()
1978
+
1979
+ attrs.fields = serializedIndex.fields
1980
+
1981
+ attrs.fieldVectors = fieldVectors
1982
+ attrs.invertedIndex = invertedIndex
1983
+ attrs.tokenSet = tokenSetBuilder.root
1984
+ attrs.pipeline = pipeline
1985
+
1986
+ return new lunr.Index(attrs)
1987
+ }
1988
+ /*!
1989
+ * lunr.Builder
1990
+ * Copyright (C) 2017 Oliver Nightingale
1991
+ */
1992
+
1993
+ /**
1994
+ * lunr.Builder performs indexing on a set of documents and
1995
+ * returns instances of lunr.Index ready for querying.
1996
+ *
1997
+ * All configuration of the index is done via the builder, the
1998
+ * fields to index, the document reference, the text processing
1999
+ * pipeline and document scoring parameters are all set on the
2000
+ * builder before indexing.
2001
+ *
2002
+ * @constructor
2003
+ * @property {string} _ref - Internal reference to the document reference field.
2004
+ * @property {string[]} _fields - Internal reference to the document fields to index.
2005
+ * @property {object} invertedIndex - The inverted index maps terms to document fields.
2006
+ * @property {object} documentTermFrequencies - Keeps track of document term frequencies.
2007
+ * @property {object} documentLengths - Keeps track of the length of documents added to the index.
2008
+ * @property {lunr.tokenizer} tokenizer - Function for splitting strings into tokens for indexing.
2009
+ * @property {lunr.Pipeline} pipeline - The pipeline performs text processing on tokens before indexing.
2010
+ * @property {lunr.Pipeline} searchPipeline - A pipeline for processing search terms before querying the index.
2011
+ * @property {number} documentCount - Keeps track of the total number of documents indexed.
2012
+ * @property {number} _b - A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75.
2013
+ * @property {number} _k1 - A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2.
2014
+ * @property {number} termIndex - A counter incremented for each unique term, used to identify a terms position in the vector space.
2015
+ * @property {array} metadataWhitelist - A list of metadata keys that have been whitelisted for entry in the index.
2016
+ */
2017
+ lunr.Builder = function () {
2018
+ this._ref = "id"
2019
+ this._fields = []
2020
+ this.invertedIndex = Object.create(null)
2021
+ this.fieldTermFrequencies = {}
2022
+ this.fieldLengths = {}
2023
+ this.tokenizer = lunr.tokenizer
2024
+ this.pipeline = new lunr.Pipeline
2025
+ this.searchPipeline = new lunr.Pipeline
2026
+ this.documentCount = 0
2027
+ this._b = 0.75
2028
+ this._k1 = 1.2
2029
+ this.termIndex = 0
2030
+ this.metadataWhitelist = []
2031
+ }
2032
+
2033
+ /**
2034
+ * Sets the document field used as the document reference. Every document must have this field.
2035
+ * The type of this field in the document should be a string, if it is not a string it will be
2036
+ * coerced into a string by calling toString.
2037
+ *
2038
+ * The default ref is 'id'.
2039
+ *
2040
+ * The ref should _not_ be changed during indexing, it should be set before any documents are
2041
+ * added to the index. Changing it during indexing can lead to inconsistent results.
2042
+ *
2043
+ * @param {string} ref - The name of the reference field in the document.
2044
+ */
2045
+ lunr.Builder.prototype.ref = function (ref) {
2046
+ this._ref = ref
2047
+ }
2048
+
2049
+ /**
2050
+ * Adds a field to the list of document fields that will be indexed. Every document being
2051
+ * indexed should have this field. Null values for this field in indexed documents will
2052
+ * not cause errors but will limit the chance of that document being retrieved by searches.
2053
+ *
2054
+ * All fields should be added before adding documents to the index. Adding fields after
2055
+ * a document has been indexed will have no effect on already indexed documents.
2056
+ *
2057
+ * @param {string} field - The name of a field to index in all documents.
2058
+ */
2059
+ lunr.Builder.prototype.field = function (field) {
2060
+ this._fields.push(field)
2061
+ }
2062
+
2063
+ /**
2064
+ * A parameter to tune the amount of field length normalisation that is applied when
2065
+ * calculating relevance scores. A value of 0 will completely disable any normalisation
2066
+ * and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b
2067
+ * will be clamped to the range 0 - 1.
2068
+ *
2069
+ * @param {number} number - The value to set for this tuning parameter.
2070
+ */
2071
+ lunr.Builder.prototype.b = function (number) {
2072
+ if (number < 0) {
2073
+ this._b = 0
2074
+ } else if (number > 1) {
2075
+ this._b = 1
2076
+ } else {
2077
+ this._b = number
2078
+ }
2079
+ }
2080
+
2081
+ /**
2082
+ * A parameter that controls the speed at which a rise in term frequency results in term
2083
+ * frequency saturation. The default value is 1.2. Setting this to a higher value will give
2084
+ * slower saturation levels, a lower value will result in quicker saturation.
2085
+ *
2086
+ * @param {number} number - The value to set for this tuning parameter.
2087
+ */
2088
+ lunr.Builder.prototype.k1 = function (number) {
2089
+ this._k1 = number
2090
+ }
2091
+
2092
+ /**
2093
+ * Adds a document to the index.
2094
+ *
2095
+ * Before adding fields to the index the index should have been fully setup, with the document
2096
+ * ref and all fields to index already having been specified.
2097
+ *
2098
+ * The document must have a field name as specified by the ref (by default this is 'id') and
2099
+ * it should have all fields defined for indexing, though null or undefined values will not
2100
+ * cause errors.
2101
+ *
2102
+ * @param {object} doc - The document to add to the index.
2103
+ */
2104
+ lunr.Builder.prototype.add = function (doc) {
2105
+ var docRef = doc[this._ref]
2106
+
2107
+ this.documentCount += 1
2108
+
2109
+ for (var i = 0; i < this._fields.length; i++) {
2110
+ var fieldName = this._fields[i],
2111
+ field = doc[fieldName],
2112
+ tokens = this.tokenizer(field),
2113
+ terms = this.pipeline.run(tokens),
2114
+ fieldRef = new lunr.FieldRef (docRef, fieldName),
2115
+ fieldTerms = Object.create(null)
2116
+
2117
+ this.fieldTermFrequencies[fieldRef] = fieldTerms
2118
+ this.fieldLengths[fieldRef] = 0
2119
+
2120
+ // store the length of this field for this document
2121
+ this.fieldLengths[fieldRef] += terms.length
2122
+
2123
+ // calculate term frequencies for this field
2124
+ for (var j = 0; j < terms.length; j++) {
2125
+ var term = terms[j]
2126
+
2127
+ if (fieldTerms[term] == undefined) {
2128
+ fieldTerms[term] = 0
2129
+ }
2130
+
2131
+ fieldTerms[term] += 1
2132
+
2133
+ // add to inverted index
2134
+ // create an initial posting if one doesn't exist
2135
+ if (this.invertedIndex[term] == undefined) {
2136
+ var posting = Object.create(null)
2137
+ posting["_index"] = this.termIndex
2138
+ this.termIndex += 1
2139
+
2140
+ for (var k = 0; k < this._fields.length; k++) {
2141
+ posting[this._fields[k]] = Object.create(null)
2142
+ }
2143
+
2144
+ this.invertedIndex[term] = posting
2145
+ }
2146
+
2147
+ // add an entry for this term/fieldName/docRef to the invertedIndex
2148
+ if (this.invertedIndex[term][fieldName][docRef] == undefined) {
2149
+ this.invertedIndex[term][fieldName][docRef] = Object.create(null)
2150
+ }
2151
+
2152
+ // store all whitelisted metadata about this token in the
2153
+ // inverted index
2154
+ for (var l = 0; l < this.metadataWhitelist.length; l++) {
2155
+ var metadataKey = this.metadataWhitelist[l],
2156
+ metadata = term.metadata[metadataKey]
2157
+
2158
+ if (this.invertedIndex[term][fieldName][docRef][metadataKey] == undefined) {
2159
+ this.invertedIndex[term][fieldName][docRef][metadataKey] = []
2160
+ }
2161
+
2162
+ this.invertedIndex[term][fieldName][docRef][metadataKey].push(metadata)
2163
+ }
2164
+ }
2165
+
2166
+ }
2167
+ }
2168
+
2169
+ /**
2170
+ * Calculates the average document length for this index
2171
+ *
2172
+ * @private
2173
+ */
2174
+ lunr.Builder.prototype.calculateAverageFieldLengths = function () {
2175
+
2176
+ var fieldRefs = Object.keys(this.fieldLengths),
2177
+ numberOfFields = fieldRefs.length,
2178
+ accumulator = {},
2179
+ documentsWithField = {}
2180
+
2181
+ for (var i = 0; i < numberOfFields; i++) {
2182
+ var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
2183
+ field = fieldRef.fieldName
2184
+
2185
+ documentsWithField[field] || (documentsWithField[field] = 0)
2186
+ documentsWithField[field] += 1
2187
+
2188
+ accumulator[field] || (accumulator[field] = 0)
2189
+ accumulator[field] += this.fieldLengths[fieldRef]
2190
+ }
2191
+
2192
+ for (var i = 0; i < this._fields.length; i++) {
2193
+ var field = this._fields[i]
2194
+ accumulator[field] = accumulator[field] / documentsWithField[field]
2195
+ }
2196
+
2197
+ this.averageFieldLength = accumulator
2198
+ }
2199
+
2200
+ /**
2201
+ * Builds a vector space model of every document using lunr.Vector
2202
+ *
2203
+ * @private
2204
+ */
2205
+ lunr.Builder.prototype.createFieldVectors = function () {
2206
+ var fieldVectors = {},
2207
+ fieldRefs = Object.keys(this.fieldTermFrequencies),
2208
+ fieldRefsLength = fieldRefs.length
2209
+
2210
+ for (var i = 0; i < fieldRefsLength; i++) {
2211
+ var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
2212
+ field = fieldRef.fieldName,
2213
+ fieldLength = this.fieldLengths[fieldRef],
2214
+ fieldVector = new lunr.Vector,
2215
+ termFrequencies = this.fieldTermFrequencies[fieldRef],
2216
+ terms = Object.keys(termFrequencies),
2217
+ termsLength = terms.length
2218
+
2219
+ for (var j = 0; j < termsLength; j++) {
2220
+ var term = terms[j],
2221
+ tf = termFrequencies[term],
2222
+ termIndex = this.invertedIndex[term]._index,
2223
+ idf = lunr.idf(this.invertedIndex[term], this.documentCount),
2224
+ score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[field])) + tf),
2225
+ scoreWithPrecision = Math.round(score * 1000) / 1000
2226
+ // Converts 1.23456789 to 1.234.
2227
+ // Reducing the precision so that the vectors take up less
2228
+ // space when serialised. Doing it now so that they behave
2229
+ // the same before and after serialisation. Also, this is
2230
+ // the fastest approach to reducing a number's precision in
2231
+ // JavaScript.
2232
+
2233
+ fieldVector.insert(termIndex, scoreWithPrecision)
2234
+ }
2235
+
2236
+ fieldVectors[fieldRef] = fieldVector
2237
+ }
2238
+
2239
+ this.fieldVectors = fieldVectors
2240
+ }
2241
+
2242
+ /**
2243
+ * Creates a token set of all tokens in the index using lunr.TokenSet
2244
+ *
2245
+ * @private
2246
+ */
2247
+ lunr.Builder.prototype.createTokenSet = function () {
2248
+ this.tokenSet = lunr.TokenSet.fromArray(
2249
+ Object.keys(this.invertedIndex).sort()
2250
+ )
2251
+ }
2252
+
2253
+ /**
2254
+ * Builds the index, creating an instance of lunr.Index.
2255
+ *
2256
+ * This completes the indexing process and should only be called
2257
+ * once all documents have been added to the index.
2258
+ *
2259
+ * @returns {lunr.Index}
2260
+ */
2261
+ lunr.Builder.prototype.build = function () {
2262
+ this.calculateAverageFieldLengths()
2263
+ this.createFieldVectors()
2264
+ this.createTokenSet()
2265
+
2266
+ return new lunr.Index({
2267
+ invertedIndex: this.invertedIndex,
2268
+ fieldVectors: this.fieldVectors,
2269
+ tokenSet: this.tokenSet,
2270
+ fields: this._fields,
2271
+ pipeline: this.searchPipeline
2272
+ })
2273
+ }
2274
+
2275
+ /**
2276
+ * Applies a plugin to the index builder.
2277
+ *
2278
+ * A plugin is a function that is called with the index builder as its context.
2279
+ * Plugins can be used to customise or extend the behaviour of the index
2280
+ * in some way. A plugin is just a function, that encapsulated the custom
2281
+ * behaviour that should be applied when building the index.
2282
+ *
2283
+ * The plugin function will be called with the index builder as its argument, additional
2284
+ * arguments can also be passed when calling use. The function will be called
2285
+ * with the index builder as its context.
2286
+ *
2287
+ * @param {Function} plugin The plugin to apply.
2288
+ */
2289
+ lunr.Builder.prototype.use = function (fn) {
2290
+ var args = Array.prototype.slice.call(arguments, 1)
2291
+ args.unshift(this)
2292
+ fn.apply(this, args)
2293
+ }
2294
+ /**
2295
+ * Contains and collects metadata about a matching document.
2296
+ * A single instance of lunr.MatchData is returned as part of every
2297
+ * lunr.Index~Result.
2298
+ *
2299
+ * @constructor
2300
+ * @param {string} term - The term this match data is associated with
2301
+ * @param {string} field - The field in which the term was found
2302
+ * @param {object} metadata - The metadata recorded about this term in this field
2303
+ * @property {object} metadata - A cloned collection of metadata associated with this document.
2304
+ * @see {@link lunr.Index~Result}
2305
+ */
2306
+ lunr.MatchData = function (term, field, metadata) {
2307
+ var clonedMetadata = Object.create(null),
2308
+ metadataKeys = Object.keys(metadata)
2309
+
2310
+ // Cloning the metadata to prevent the original
2311
+ // being mutated during match data combination.
2312
+ // Metadata is kept in an array within the inverted
2313
+ // index so cloning the data can be done with
2314
+ // Array#slice
2315
+ for (var i = 0; i < metadataKeys.length; i++) {
2316
+ var key = metadataKeys[i]
2317
+ clonedMetadata[key] = metadata[key].slice()
2318
+ }
2319
+
2320
+ this.metadata = Object.create(null)
2321
+ this.metadata[term] = Object.create(null)
2322
+ this.metadata[term][field] = clonedMetadata
2323
+ }
2324
+
2325
+ /**
2326
+ * An instance of lunr.MatchData will be created for every term that matches a
2327
+ * document. However only one instance is required in a lunr.Index~Result. This
2328
+ * method combines metadata from another instance of lunr.MatchData with this
2329
+ * objects metadata.
2330
+ *
2331
+ * @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one.
2332
+ * @see {@link lunr.Index~Result}
2333
+ */
2334
+ lunr.MatchData.prototype.combine = function (otherMatchData) {
2335
+ var terms = Object.keys(otherMatchData.metadata)
2336
+
2337
+ for (var i = 0; i < terms.length; i++) {
2338
+ var term = terms[i],
2339
+ fields = Object.keys(otherMatchData.metadata[term])
2340
+
2341
+ if (this.metadata[term] == undefined) {
2342
+ this.metadata[term] = Object.create(null)
2343
+ }
2344
+
2345
+ for (var j = 0; j < fields.length; j++) {
2346
+ var field = fields[j],
2347
+ keys = Object.keys(otherMatchData.metadata[term][field])
2348
+
2349
+ if (this.metadata[term][field] == undefined) {
2350
+ this.metadata[term][field] = Object.create(null)
2351
+ }
2352
+
2353
+ for (var k = 0; k < keys.length; k++) {
2354
+ var key = keys[k]
2355
+
2356
+ if (this.metadata[term][field][key] == undefined) {
2357
+ this.metadata[term][field][key] = otherMatchData.metadata[term][field][key]
2358
+ } else {
2359
+ this.metadata[term][field][key] = this.metadata[term][field][key].concat(otherMatchData.metadata[term][field][key])
2360
+ }
2361
+
2362
+ }
2363
+ }
2364
+ }
2365
+ }
2366
+ /**
2367
+ * A lunr.Query provides a programmatic way of defining queries to be performed
2368
+ * against a {@link lunr.Index}.
2369
+ *
2370
+ * Prefer constructing a lunr.Query using the {@link lunr.Index#query} method
2371
+ * so the query object is pre-initialized with the right index fields.
2372
+ *
2373
+ * @constructor
2374
+ * @property {lunr.Query~Clause[]} clauses - An array of query clauses.
2375
+ * @property {string[]} allFields - An array of all available fields in a lunr.Index.
2376
+ */
2377
+ lunr.Query = function (allFields) {
2378
+ this.clauses = []
2379
+ this.allFields = allFields
2380
+ }
2381
+
2382
+ /**
2383
+ * Constants for indicating what kind of automatic wildcard insertion will be used when constructing a query clause.
2384
+ *
2385
+ * This allows wildcards to be added to the beginning and end of a term without having to manually do any string
2386
+ * concatenation.
2387
+ *
2388
+ * The wildcard constants can be bitwise combined to select both leading and trailing wildcards.
2389
+ *
2390
+ * @constant
2391
+ * @default
2392
+ * @property {number} wildcard.NONE - The term will have no wildcards inserted, this is the default behaviour
2393
+ * @property {number} wildcard.LEADING - Prepend the term with a wildcard, unless a leading wildcard already exists
2394
+ * @property {number} wildcard.TRAILING - Append a wildcard to the term, unless a trailing wildcard already exists
2395
+ * @see lunr.Query~Clause
2396
+ * @see lunr.Query#clause
2397
+ * @see lunr.Query#term
2398
+ * @example <caption>query term with trailing wildcard</caption>
2399
+ * query.term('foo', { wildcard: lunr.Query.wildcard.TRAILING })
2400
+ * @example <caption>query term with leading and trailing wildcard</caption>
2401
+ * query.term('foo', {
2402
+ * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING
2403
+ * })
2404
+ */
2405
+ lunr.Query.wildcard = new String ("*")
2406
+ lunr.Query.wildcard.NONE = 0
2407
+ lunr.Query.wildcard.LEADING = 1
2408
+ lunr.Query.wildcard.TRAILING = 2
2409
+
2410
+ /**
2411
+ * A single clause in a {@link lunr.Query} contains a term and details on how to
2412
+ * match that term against a {@link lunr.Index}.
2413
+ *
2414
+ * @typedef {Object} lunr.Query~Clause
2415
+ * @property {string[]} fields - The fields in an index this clause should be matched against.
2416
+ * @property {number} [boost=1] - Any boost that should be applied when matching this clause.
2417
+ * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be.
2418
+ * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline.
2419
+ * @property {number} [wildcard=0] - Whether the term should have wildcards appended or prepended.
2420
+ */
2421
+
2422
+ /**
2423
+ * Adds a {@link lunr.Query~Clause} to this query.
2424
+ *
2425
+ * Unless the clause contains the fields to be matched all fields will be matched. In addition
2426
+ * a default boost of 1 is applied to the clause.
2427
+ *
2428
+ * @param {lunr.Query~Clause} clause - The clause to add to this query.
2429
+ * @see lunr.Query~Clause
2430
+ * @returns {lunr.Query}
2431
+ */
2432
+ lunr.Query.prototype.clause = function (clause) {
2433
+ if (!('fields' in clause)) {
2434
+ clause.fields = this.allFields
2435
+ }
2436
+
2437
+ if (!('boost' in clause)) {
2438
+ clause.boost = 1
2439
+ }
2440
+
2441
+ if (!('usePipeline' in clause)) {
2442
+ clause.usePipeline = true
2443
+ }
2444
+
2445
+ if (!('wildcard' in clause)) {
2446
+ clause.wildcard = lunr.Query.wildcard.NONE
2447
+ }
2448
+
2449
+ if ((clause.wildcard & lunr.Query.wildcard.LEADING) && (clause.term.charAt(0) != lunr.Query.wildcard)) {
2450
+ clause.term = "*" + clause.term
2451
+ }
2452
+
2453
+ if ((clause.wildcard & lunr.Query.wildcard.TRAILING) && (clause.term.slice(-1) != lunr.Query.wildcard)) {
2454
+ clause.term = "" + clause.term + "*"
2455
+ }
2456
+
2457
+ this.clauses.push(clause)
2458
+
2459
+ return this
2460
+ }
2461
+
2462
+ /**
2463
+ * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause}
2464
+ * to the list of clauses that make up this query.
2465
+ *
2466
+ * @param {string} term - The term to add to the query.
2467
+ * @param {Object} [options] - Any additional properties to add to the query clause.
2468
+ * @returns {lunr.Query}
2469
+ * @see lunr.Query#clause
2470
+ * @see lunr.Query~Clause
2471
+ * @example <caption>adding a single term to a query</caption>
2472
+ * query.term("foo")
2473
+ * @example <caption>adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard</caption>
2474
+ * query.term("foo", {
2475
+ * fields: ["title"],
2476
+ * boost: 10,
2477
+ * wildcard: lunr.Query.wildcard.TRAILING
2478
+ * })
2479
+ */
2480
+ lunr.Query.prototype.term = function (term, options) {
2481
+ var clause = options || {}
2482
+ clause.term = term
2483
+
2484
+ this.clause(clause)
2485
+
2486
+ return this
2487
+ }
2488
+ lunr.QueryParseError = function (message, start, end) {
2489
+ this.name = "QueryParseError"
2490
+ this.message = message
2491
+ this.start = start
2492
+ this.end = end
2493
+ }
2494
+
2495
+ lunr.QueryParseError.prototype = new Error
2496
+ lunr.QueryLexer = function (str) {
2497
+ this.lexemes = []
2498
+ this.str = str
2499
+ this.length = str.length
2500
+ this.pos = 0
2501
+ this.start = 0
2502
+ this.escapeCharPositions = []
2503
+ }
2504
+
2505
+ lunr.QueryLexer.prototype.run = function () {
2506
+ var state = lunr.QueryLexer.lexText
2507
+
2508
+ while (state) {
2509
+ state = state(this)
2510
+ }
2511
+ }
2512
+
2513
+ lunr.QueryLexer.prototype.sliceString = function () {
2514
+ var subSlices = [],
2515
+ sliceStart = this.start,
2516
+ sliceEnd = this.pos
2517
+
2518
+ for (var i = 0; i < this.escapeCharPositions.length; i++) {
2519
+ sliceEnd = this.escapeCharPositions[i]
2520
+ subSlices.push(this.str.slice(sliceStart, sliceEnd))
2521
+ sliceStart = sliceEnd + 1
2522
+ }
2523
+
2524
+ subSlices.push(this.str.slice(sliceStart, this.pos))
2525
+ this.escapeCharPositions.length = 0
2526
+
2527
+ return subSlices.join('')
2528
+ }
2529
+
2530
+ lunr.QueryLexer.prototype.emit = function (type) {
2531
+ this.lexemes.push({
2532
+ type: type,
2533
+ str: this.sliceString(),
2534
+ start: this.start,
2535
+ end: this.pos
2536
+ })
2537
+
2538
+ this.start = this.pos
2539
+ }
2540
+
2541
+ lunr.QueryLexer.prototype.escapeCharacter = function () {
2542
+ this.escapeCharPositions.push(this.pos - 1)
2543
+ this.pos += 1
2544
+ }
2545
+
2546
+ lunr.QueryLexer.prototype.next = function () {
2547
+ if (this.pos >= this.length) {
2548
+ return lunr.QueryLexer.EOS
2549
+ }
2550
+
2551
+ var char = this.str.charAt(this.pos)
2552
+ this.pos += 1
2553
+ return char
2554
+ }
2555
+
2556
+ lunr.QueryLexer.prototype.width = function () {
2557
+ return this.pos - this.start
2558
+ }
2559
+
2560
+ lunr.QueryLexer.prototype.ignore = function () {
2561
+ if (this.start == this.pos) {
2562
+ this.pos += 1
2563
+ }
2564
+
2565
+ this.start = this.pos
2566
+ }
2567
+
2568
+ lunr.QueryLexer.prototype.backup = function () {
2569
+ this.pos -= 1
2570
+ }
2571
+
2572
+ lunr.QueryLexer.prototype.acceptDigitRun = function () {
2573
+ var char, charCode
2574
+
2575
+ do {
2576
+ char = this.next()
2577
+ charCode = char.charCodeAt(0)
2578
+ } while (charCode > 47 && charCode < 58)
2579
+
2580
+ if (char != lunr.QueryLexer.EOS) {
2581
+ this.backup()
2582
+ }
2583
+ }
2584
+
2585
+ lunr.QueryLexer.prototype.more = function () {
2586
+ return this.pos < this.length
2587
+ }
2588
+
2589
+ lunr.QueryLexer.EOS = 'EOS'
2590
+ lunr.QueryLexer.FIELD = 'FIELD'
2591
+ lunr.QueryLexer.TERM = 'TERM'
2592
+ lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE'
2593
+ lunr.QueryLexer.BOOST = 'BOOST'
2594
+
2595
+ lunr.QueryLexer.lexField = function (lexer) {
2596
+ lexer.backup()
2597
+ lexer.emit(lunr.QueryLexer.FIELD)
2598
+ lexer.ignore()
2599
+ return lunr.QueryLexer.lexText
2600
+ }
2601
+
2602
+ lunr.QueryLexer.lexTerm = function (lexer) {
2603
+ if (lexer.width() > 1) {
2604
+ lexer.backup()
2605
+ lexer.emit(lunr.QueryLexer.TERM)
2606
+ }
2607
+
2608
+ lexer.ignore()
2609
+
2610
+ if (lexer.more()) {
2611
+ return lunr.QueryLexer.lexText
2612
+ }
2613
+ }
2614
+
2615
+ lunr.QueryLexer.lexEditDistance = function (lexer) {
2616
+ lexer.ignore()
2617
+ lexer.acceptDigitRun()
2618
+ lexer.emit(lunr.QueryLexer.EDIT_DISTANCE)
2619
+ return lunr.QueryLexer.lexText
2620
+ }
2621
+
2622
+ lunr.QueryLexer.lexBoost = function (lexer) {
2623
+ lexer.ignore()
2624
+ lexer.acceptDigitRun()
2625
+ lexer.emit(lunr.QueryLexer.BOOST)
2626
+ return lunr.QueryLexer.lexText
2627
+ }
2628
+
2629
+ lunr.QueryLexer.lexEOS = function (lexer) {
2630
+ if (lexer.width() > 0) {
2631
+ lexer.emit(lunr.QueryLexer.TERM)
2632
+ }
2633
+ }
2634
+
2635
+ // This matches the separator used when tokenising fields
2636
+ // within a document. These should match otherwise it is
2637
+ // not possible to search for some tokens within a document.
2638
+ //
2639
+ // It is possible for the user to change the separator on the
2640
+ // tokenizer so it _might_ clash with any other of the special
2641
+ // characters already used within the search string, e.g. :.
2642
+ //
2643
+ // This means that it is possible to change the separator in
2644
+ // such a way that makes some words unsearchable using a search
2645
+ // string.
2646
+ lunr.QueryLexer.termSeparator = lunr.tokenizer.separator
2647
+
2648
+ lunr.QueryLexer.lexText = function (lexer) {
2649
+ while (true) {
2650
+ var char = lexer.next()
2651
+
2652
+ if (char == lunr.QueryLexer.EOS) {
2653
+ return lunr.QueryLexer.lexEOS
2654
+ }
2655
+
2656
+ // Escape character is '\'
2657
+ if (char.charCodeAt(0) == 92) {
2658
+ lexer.escapeCharacter()
2659
+ continue
2660
+ }
2661
+
2662
+ if (char == ":") {
2663
+ return lunr.QueryLexer.lexField
2664
+ }
2665
+
2666
+ if (char == "~") {
2667
+ lexer.backup()
2668
+ if (lexer.width() > 0) {
2669
+ lexer.emit(lunr.QueryLexer.TERM)
2670
+ }
2671
+ return lunr.QueryLexer.lexEditDistance
2672
+ }
2673
+
2674
+ if (char == "^") {
2675
+ lexer.backup()
2676
+ if (lexer.width() > 0) {
2677
+ lexer.emit(lunr.QueryLexer.TERM)
2678
+ }
2679
+ return lunr.QueryLexer.lexBoost
2680
+ }
2681
+
2682
+ if (char.match(lunr.QueryLexer.termSeparator)) {
2683
+ return lunr.QueryLexer.lexTerm
2684
+ }
2685
+ }
2686
+ }
2687
+
2688
+ lunr.QueryParser = function (str, query) {
2689
+ this.lexer = new lunr.QueryLexer (str)
2690
+ this.query = query
2691
+ this.currentClause = {}
2692
+ this.lexemeIdx = 0
2693
+ }
2694
+
2695
+ lunr.QueryParser.prototype.parse = function () {
2696
+ this.lexer.run()
2697
+ this.lexemes = this.lexer.lexemes
2698
+
2699
+ var state = lunr.QueryParser.parseFieldOrTerm
2700
+
2701
+ while (state) {
2702
+ state = state(this)
2703
+ }
2704
+
2705
+ return this.query
2706
+ }
2707
+
2708
+ lunr.QueryParser.prototype.peekLexeme = function () {
2709
+ return this.lexemes[this.lexemeIdx]
2710
+ }
2711
+
2712
+ lunr.QueryParser.prototype.consumeLexeme = function () {
2713
+ var lexeme = this.peekLexeme()
2714
+ this.lexemeIdx += 1
2715
+ return lexeme
2716
+ }
2717
+
2718
+ lunr.QueryParser.prototype.nextClause = function () {
2719
+ var completedClause = this.currentClause
2720
+ this.query.clause(completedClause)
2721
+ this.currentClause = {}
2722
+ }
2723
+
2724
+ lunr.QueryParser.parseFieldOrTerm = function (parser) {
2725
+ var lexeme = parser.peekLexeme()
2726
+
2727
+ if (lexeme == undefined) {
2728
+ return
2729
+ }
2730
+
2731
+ switch (lexeme.type) {
2732
+ case lunr.QueryLexer.FIELD:
2733
+ return lunr.QueryParser.parseField
2734
+ case lunr.QueryLexer.TERM:
2735
+ return lunr.QueryParser.parseTerm
2736
+ default:
2737
+ var errorMessage = "expected either a field or a term, found " + lexeme.type
2738
+
2739
+ if (lexeme.str.length >= 1) {
2740
+ errorMessage += " with value '" + lexeme.str + "'"
2741
+ }
2742
+
2743
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
2744
+ }
2745
+ }
2746
+
2747
+ lunr.QueryParser.parseField = function (parser) {
2748
+ var lexeme = parser.consumeLexeme()
2749
+
2750
+ if (lexeme == undefined) {
2751
+ return
2752
+ }
2753
+
2754
+ if (parser.query.allFields.indexOf(lexeme.str) == -1) {
2755
+ var possibleFields = parser.query.allFields.map(function (f) { return "'" + f + "'" }).join(', '),
2756
+ errorMessage = "unrecognised field '" + lexeme.str + "', possible fields: " + possibleFields
2757
+
2758
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
2759
+ }
2760
+
2761
+ parser.currentClause.fields = [lexeme.str]
2762
+
2763
+ var nextLexeme = parser.peekLexeme()
2764
+
2765
+ if (nextLexeme == undefined) {
2766
+ var errorMessage = "expecting term, found nothing"
2767
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
2768
+ }
2769
+
2770
+ switch (nextLexeme.type) {
2771
+ case lunr.QueryLexer.TERM:
2772
+ return lunr.QueryParser.parseTerm
2773
+ default:
2774
+ var errorMessage = "expecting term, found '" + nextLexeme.type + "'"
2775
+ throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
2776
+ }
2777
+ }
2778
+
2779
+ lunr.QueryParser.parseTerm = function (parser) {
2780
+ var lexeme = parser.consumeLexeme()
2781
+
2782
+ if (lexeme == undefined) {
2783
+ return
2784
+ }
2785
+
2786
+ parser.currentClause.term = lexeme.str.toLowerCase()
2787
+
2788
+ if (lexeme.str.indexOf("*") != -1) {
2789
+ parser.currentClause.usePipeline = false
2790
+ }
2791
+
2792
+ var nextLexeme = parser.peekLexeme()
2793
+
2794
+ if (nextLexeme == undefined) {
2795
+ parser.nextClause()
2796
+ return
2797
+ }
2798
+
2799
+ switch (nextLexeme.type) {
2800
+ case lunr.QueryLexer.TERM:
2801
+ parser.nextClause()
2802
+ return lunr.QueryParser.parseTerm
2803
+ case lunr.QueryLexer.FIELD:
2804
+ parser.nextClause()
2805
+ return lunr.QueryParser.parseField
2806
+ case lunr.QueryLexer.EDIT_DISTANCE:
2807
+ return lunr.QueryParser.parseEditDistance
2808
+ case lunr.QueryLexer.BOOST:
2809
+ return lunr.QueryParser.parseBoost
2810
+ default:
2811
+ var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
2812
+ throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
2813
+ }
2814
+ }
2815
+
2816
+ lunr.QueryParser.parseEditDistance = function (parser) {
2817
+ var lexeme = parser.consumeLexeme()
2818
+
2819
+ if (lexeme == undefined) {
2820
+ return
2821
+ }
2822
+
2823
+ var editDistance = parseInt(lexeme.str, 10)
2824
+
2825
+ if (isNaN(editDistance)) {
2826
+ var errorMessage = "edit distance must be numeric"
2827
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
2828
+ }
2829
+
2830
+ parser.currentClause.editDistance = editDistance
2831
+
2832
+ var nextLexeme = parser.peekLexeme()
2833
+
2834
+ if (nextLexeme == undefined) {
2835
+ parser.nextClause()
2836
+ return
2837
+ }
2838
+
2839
+ switch (nextLexeme.type) {
2840
+ case lunr.QueryLexer.TERM:
2841
+ parser.nextClause()
2842
+ return lunr.QueryParser.parseTerm
2843
+ case lunr.QueryLexer.FIELD:
2844
+ parser.nextClause()
2845
+ return lunr.QueryParser.parseField
2846
+ case lunr.QueryLexer.EDIT_DISTANCE:
2847
+ return lunr.QueryParser.parseEditDistance
2848
+ case lunr.QueryLexer.BOOST:
2849
+ return lunr.QueryParser.parseBoost
2850
+ default:
2851
+ var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
2852
+ throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
2853
+ }
2854
+ }
2855
+
2856
+ lunr.QueryParser.parseBoost = function (parser) {
2857
+ var lexeme = parser.consumeLexeme()
2858
+
2859
+ if (lexeme == undefined) {
2860
+ return
2861
+ }
2862
+
2863
+ var boost = parseInt(lexeme.str, 10)
2864
+
2865
+ if (isNaN(boost)) {
2866
+ var errorMessage = "boost must be numeric"
2867
+ throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
2868
+ }
2869
+
2870
+ parser.currentClause.boost = boost
2871
+
2872
+ var nextLexeme = parser.peekLexeme()
2873
+
2874
+ if (nextLexeme == undefined) {
2875
+ parser.nextClause()
2876
+ return
2877
+ }
2878
+
2879
+ switch (nextLexeme.type) {
2880
+ case lunr.QueryLexer.TERM:
2881
+ parser.nextClause()
2882
+ return lunr.QueryParser.parseTerm
2883
+ case lunr.QueryLexer.FIELD:
2884
+ parser.nextClause()
2885
+ return lunr.QueryParser.parseField
2886
+ case lunr.QueryLexer.EDIT_DISTANCE:
2887
+ return lunr.QueryParser.parseEditDistance
2888
+ case lunr.QueryLexer.BOOST:
2889
+ return lunr.QueryParser.parseBoost
2890
+ default:
2891
+ var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
2892
+ throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
2893
+ }
2894
+ }
2895
+
2896
+ /**
2897
+ * export the module via AMD, CommonJS or as a browser global
2898
+ * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js
2899
+ */
2900
+ ;(function (root, factory) {
2901
+ if (typeof define === 'function' && define.amd) {
2902
+ // AMD. Register as an anonymous module.
2903
+ define(factory)
2904
+ } else if (typeof exports === 'object') {
2905
+ /**
2906
+ * Node. Does not work with strict CommonJS, but
2907
+ * only CommonJS-like enviroments that support module.exports,
2908
+ * like Node.
2909
+ */
2910
+ module.exports = factory()
2911
+ } else {
2912
+ // Browser globals (root is window)
2913
+ root.lunr = factory()
2914
+ }
2915
+ }(this, function () {
2916
+ /**
2917
+ * Just return a value to define the module export.
2918
+ * This example returns an object, but the module
2919
+ * can return a function as the exported value.
2920
+ */
2921
+ return lunr
2922
+ }))
2923
+ })();