Proyectos de Subversion Moodle

Rev

| Ultima modificación | Ver Log |

Rev Autor Línea Nro. Línea
1 efrain 1
/**
2
 * moodle readme
3
 *
4
 * Lunrjs can be downloaded from https://github.com/olivernn/lunr.js. To update this library get the lunr.js file
5
 * from this project and replace the content below with the new content.
6
 */
7
 
8
/**
9
 * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.9
10
 * Copyright (C) 2020 Oliver Nightingale
11
 * @license MIT
12
 */
13
 
14
;(function(){
15
 
16
/**
17
 * A convenience function for configuring and constructing
18
 * a new lunr Index.
19
 *
20
 * A lunr.Builder instance is created and the pipeline setup
21
 * with a trimmer, stop word filter and stemmer.
22
 *
23
 * This builder object is yielded to the configuration function
24
 * that is passed as a parameter, allowing the list of fields
25
 * and other builder parameters to be customised.
26
 *
27
 * All documents _must_ be added within the passed config function.
28
 *
29
 * @example
30
 * var idx = lunr(function () {
31
 *   this.field('title')
32
 *   this.field('body')
33
 *   this.ref('id')
34
 *
35
 *   documents.forEach(function (doc) {
36
 *     this.add(doc)
37
 *   }, this)
38
 * })
39
 *
40
 * @see {@link lunr.Builder}
41
 * @see {@link lunr.Pipeline}
42
 * @see {@link lunr.trimmer}
43
 * @see {@link lunr.stopWordFilter}
44
 * @see {@link lunr.stemmer}
45
 * @namespace {function} lunr
46
 */
47
var lunr = function (config) {
48
  var builder = new lunr.Builder
49
 
50
  builder.pipeline.add(
51
    lunr.trimmer,
52
    lunr.stopWordFilter,
53
    lunr.stemmer
54
  )
55
 
56
  builder.searchPipeline.add(
57
    lunr.stemmer
58
  )
59
 
60
  config.call(builder, builder)
61
  return builder.build()
62
}
63
 
64
lunr.version = "2.3.9"
65
/*!
66
 * lunr.utils
67
 * Copyright (C) 2020 Oliver Nightingale
68
 */
69
 
70
/**
71
 * A namespace containing utils for the rest of the lunr library
72
 * @namespace lunr.utils
73
 */
74
lunr.utils = {}
75
 
76
/**
77
 * Print a warning message to the console.
78
 *
79
 * @param {String} message The message to be printed.
80
 * @memberOf lunr.utils
81
 * @function
82
 */
83
lunr.utils.warn = (function (global) {
84
  /* eslint-disable no-console */
85
  return function (message) {
86
    if (global.console && console.warn) {
87
      console.warn(message)
88
    }
89
  }
90
  /* eslint-enable no-console */
91
})(this)
92
 
93
/**
94
 * Convert an object to a string.
95
 *
96
 * In the case of `null` and `undefined` the function returns
97
 * the empty string, in all other cases the result of calling
98
 * `toString` on the passed object is returned.
99
 *
100
 * @param {Any} obj The object to convert to a string.
101
 * @return {String} string representation of the passed object.
102
 * @memberOf lunr.utils
103
 */
104
lunr.utils.asString = function (obj) {
105
  if (obj === void 0 || obj === null) {
106
    return ""
107
  } else {
108
    return obj.toString()
109
  }
110
}
111
 
112
/**
113
 * Clones an object.
114
 *
115
 * Will create a copy of an existing object such that any mutations
116
 * on the copy cannot affect the original.
117
 *
118
 * Only shallow objects are supported, passing a nested object to this
119
 * function will cause a TypeError.
120
 *
121
 * Objects with primitives, and arrays of primitives are supported.
122
 *
123
 * @param {Object} obj The object to clone.
124
 * @return {Object} a clone of the passed object.
125
 * @throws {TypeError} when a nested object is passed.
126
 * @memberOf Utils
127
 */
128
lunr.utils.clone = function (obj) {
129
  if (obj === null || obj === undefined) {
130
    return obj
131
  }
132
 
133
  var clone = Object.create(null),
134
      keys = Object.keys(obj)
135
 
136
  for (var i = 0; i < keys.length; i++) {
137
    var key = keys[i],
138
        val = obj[key]
139
 
140
    if (Array.isArray(val)) {
141
      clone[key] = val.slice()
142
      continue
143
    }
144
 
145
    if (typeof val === 'string' ||
146
        typeof val === 'number' ||
147
        typeof val === 'boolean') {
148
      clone[key] = val
149
      continue
150
    }
151
 
152
    throw new TypeError("clone is not deep and does not support nested objects")
153
  }
154
 
155
  return clone
156
}
157
lunr.FieldRef = function (docRef, fieldName, stringValue) {
158
  this.docRef = docRef
159
  this.fieldName = fieldName
160
  this._stringValue = stringValue
161
}
162
 
163
lunr.FieldRef.joiner = "/"
164
 
165
lunr.FieldRef.fromString = function (s) {
166
  var n = s.indexOf(lunr.FieldRef.joiner)
167
 
168
  if (n === -1) {
169
    throw "malformed field ref string"
170
  }
171
 
172
  var fieldRef = s.slice(0, n),
173
      docRef = s.slice(n + 1)
174
 
175
  return new lunr.FieldRef (docRef, fieldRef, s)
176
}
177
 
178
lunr.FieldRef.prototype.toString = function () {
179
  if (this._stringValue == undefined) {
180
    this._stringValue = this.fieldName + lunr.FieldRef.joiner + this.docRef
181
  }
182
 
183
  return this._stringValue
184
}
185
/*!
186
 * lunr.Set
187
 * Copyright (C) 2020 Oliver Nightingale
188
 */
189
 
190
/**
191
 * A lunr set.
192
 *
193
 * @constructor
194
 */
195
lunr.Set = function (elements) {
196
  this.elements = Object.create(null)
197
 
198
  if (elements) {
199
    this.length = elements.length
200
 
201
    for (var i = 0; i < this.length; i++) {
202
      this.elements[elements[i]] = true
203
    }
204
  } else {
205
    this.length = 0
206
  }
207
}
208
 
209
/**
210
 * A complete set that contains all elements.
211
 *
212
 * @static
213
 * @readonly
214
 * @type {lunr.Set}
215
 */
216
lunr.Set.complete = {
217
  intersect: function (other) {
218
    return other
219
  },
220
 
221
  union: function () {
222
    return this
223
  },
224
 
225
  contains: function () {
226
    return true
227
  }
228
}
229
 
230
/**
231
 * An empty set that contains no elements.
232
 *
233
 * @static
234
 * @readonly
235
 * @type {lunr.Set}
236
 */
237
lunr.Set.empty = {
238
  intersect: function () {
239
    return this
240
  },
241
 
242
  union: function (other) {
243
    return other
244
  },
245
 
246
  contains: function () {
247
    return false
248
  }
249
}
250
 
251
/**
252
 * Returns true if this set contains the specified object.
253
 *
254
 * @param {object} object - Object whose presence in this set is to be tested.
255
 * @returns {boolean} - True if this set contains the specified object.
256
 */
257
lunr.Set.prototype.contains = function (object) {
258
  return !!this.elements[object]
259
}
260
 
261
/**
262
 * Returns a new set containing only the elements that are present in both
263
 * this set and the specified set.
264
 *
265
 * @param {lunr.Set} other - set to intersect with this set.
266
 * @returns {lunr.Set} a new set that is the intersection of this and the specified set.
267
 */
268
 
269
lunr.Set.prototype.intersect = function (other) {
270
  var a, b, elements, intersection = []
271
 
272
  if (other === lunr.Set.complete) {
273
    return this
274
  }
275
 
276
  if (other === lunr.Set.empty) {
277
    return other
278
  }
279
 
280
  if (this.length < other.length) {
281
    a = this
282
    b = other
283
  } else {
284
    a = other
285
    b = this
286
  }
287
 
288
  elements = Object.keys(a.elements)
289
 
290
  for (var i = 0; i < elements.length; i++) {
291
    var element = elements[i]
292
    if (element in b.elements) {
293
      intersection.push(element)
294
    }
295
  }
296
 
297
  return new lunr.Set (intersection)
298
}
299
 
300
/**
301
 * Returns a new set combining the elements of this and the specified set.
302
 *
303
 * @param {lunr.Set} other - set to union with this set.
304
 * @return {lunr.Set} a new set that is the union of this and the specified set.
305
 */
306
 
307
lunr.Set.prototype.union = function (other) {
308
  if (other === lunr.Set.complete) {
309
    return lunr.Set.complete
310
  }
311
 
312
  if (other === lunr.Set.empty) {
313
    return this
314
  }
315
 
316
  return new lunr.Set(Object.keys(this.elements).concat(Object.keys(other.elements)))
317
}
318
/**
319
 * A function to calculate the inverse document frequency for
320
 * a posting. This is shared between the builder and the index
321
 *
322
 * @private
323
 * @param {object} posting - The posting for a given term
324
 * @param {number} documentCount - The total number of documents.
325
 */
326
lunr.idf = function (posting, documentCount) {
327
  var documentsWithTerm = 0
328
 
329
  for (var fieldName in posting) {
330
    if (fieldName == '_index') continue // Ignore the term index, its not a field
331
    documentsWithTerm += Object.keys(posting[fieldName]).length
332
  }
333
 
334
  var x = (documentCount - documentsWithTerm + 0.5) / (documentsWithTerm + 0.5)
335
 
336
  return Math.log(1 + Math.abs(x))
337
}
338
 
339
/**
340
 * A token wraps a string representation of a token
341
 * as it is passed through the text processing pipeline.
342
 *
343
 * @constructor
344
 * @param {string} [str=''] - The string token being wrapped.
345
 * @param {object} [metadata={}] - Metadata associated with this token.
346
 */
347
lunr.Token = function (str, metadata) {
348
  this.str = str || ""
349
  this.metadata = metadata || {}
350
}
351
 
352
/**
353
 * Returns the token string that is being wrapped by this object.
354
 *
355
 * @returns {string}
356
 */
357
lunr.Token.prototype.toString = function () {
358
  return this.str
359
}
360
 
361
/**
362
 * A token update function is used when updating or optionally
363
 * when cloning a token.
364
 *
365
 * @callback lunr.Token~updateFunction
366
 * @param {string} str - The string representation of the token.
367
 * @param {Object} metadata - All metadata associated with this token.
368
 */
369
 
370
/**
371
 * Applies the given function to the wrapped string token.
372
 *
373
 * @example
374
 * token.update(function (str, metadata) {
375
 *   return str.toUpperCase()
376
 * })
377
 *
378
 * @param {lunr.Token~updateFunction} fn - A function to apply to the token string.
379
 * @returns {lunr.Token}
380
 */
381
lunr.Token.prototype.update = function (fn) {
382
  this.str = fn(this.str, this.metadata)
383
  return this
384
}
385
 
386
/**
387
 * Creates a clone of this token. Optionally a function can be
388
 * applied to the cloned token.
389
 *
390
 * @param {lunr.Token~updateFunction} [fn] - An optional function to apply to the cloned token.
391
 * @returns {lunr.Token}
392
 */
393
lunr.Token.prototype.clone = function (fn) {
394
  fn = fn || function (s) { return s }
395
  return new lunr.Token (fn(this.str, this.metadata), this.metadata)
396
}
397
/*!
398
 * lunr.tokenizer
399
 * Copyright (C) 2020 Oliver Nightingale
400
 */
401
 
402
/**
403
 * A function for splitting a string into tokens ready to be inserted into
404
 * the search index. Uses `lunr.tokenizer.separator` to split strings, change
405
 * the value of this property to change how strings are split into tokens.
406
 *
407
 * This tokenizer will convert its parameter to a string by calling `toString` and
408
 * then will split this string on the character in `lunr.tokenizer.separator`.
409
 * Arrays will have their elements converted to strings and wrapped in a lunr.Token.
410
 *
411
 * Optional metadata can be passed to the tokenizer, this metadata will be cloned and
412
 * added as metadata to every token that is created from the object to be tokenized.
413
 *
414
 * @static
415
 * @param {?(string|object|object[])} obj - The object to convert into tokens
416
 * @param {?object} metadata - Optional metadata to associate with every token
417
 * @returns {lunr.Token[]}
418
 * @see {@link lunr.Pipeline}
419
 */
420
lunr.tokenizer = function (obj, metadata) {
421
  if (obj == null || obj == undefined) {
422
    return []
423
  }
424
 
425
  if (Array.isArray(obj)) {
426
    return obj.map(function (t) {
427
      return new lunr.Token(
428
        lunr.utils.asString(t).toLowerCase(),
429
        lunr.utils.clone(metadata)
430
      )
431
    })
432
  }
433
 
434
  var str = obj.toString().toLowerCase(),
435
      len = str.length,
436
      tokens = []
437
 
438
  for (var sliceEnd = 0, sliceStart = 0; sliceEnd <= len; sliceEnd++) {
439
    var char = str.charAt(sliceEnd),
440
        sliceLength = sliceEnd - sliceStart
441
 
442
    if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) {
443
 
444
      if (sliceLength > 0) {
445
        var tokenMetadata = lunr.utils.clone(metadata) || {}
446
        tokenMetadata["position"] = [sliceStart, sliceLength]
447
        tokenMetadata["index"] = tokens.length
448
 
449
        tokens.push(
450
          new lunr.Token (
451
            str.slice(sliceStart, sliceEnd),
452
            tokenMetadata
453
          )
454
        )
455
      }
456
 
457
      sliceStart = sliceEnd + 1
458
    }
459
 
460
  }
461
 
462
  return tokens
463
}
464
 
465
/**
466
 * The separator used to split a string into tokens. Override this property to change the behaviour of
467
 * `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens.
468
 *
469
 * @static
470
 * @see lunr.tokenizer
471
 */
472
lunr.tokenizer.separator = /[\s\-]+/
473
/*!
474
 * lunr.Pipeline
475
 * Copyright (C) 2020 Oliver Nightingale
476
 */
477
 
478
/**
479
 * lunr.Pipelines maintain an ordered list of functions to be applied to all
480
 * tokens in documents entering the search index and queries being ran against
481
 * the index.
482
 *
483
 * An instance of lunr.Index created with the lunr shortcut will contain a
484
 * pipeline with a stop word filter and an English language stemmer. Extra
485
 * functions can be added before or after either of these functions or these
486
 * default functions can be removed.
487
 *
488
 * When run the pipeline will call each function in turn, passing a token, the
489
 * index of that token in the original list of all tokens and finally a list of
490
 * all the original tokens.
491
 *
492
 * The output of functions in the pipeline will be passed to the next function
493
 * in the pipeline. To exclude a token from entering the index the function
494
 * should return undefined, the rest of the pipeline will not be called with
495
 * this token.
496
 *
497
 * For serialisation of pipelines to work, all functions used in an instance of
498
 * a pipeline should be registered with lunr.Pipeline. Registered functions can
499
 * then be loaded. If trying to load a serialised pipeline that uses functions
500
 * that are not registered an error will be thrown.
501
 *
502
 * If not planning on serialising the pipeline then registering pipeline functions
503
 * is not necessary.
504
 *
505
 * @constructor
506
 */
507
lunr.Pipeline = function () {
508
  this._stack = []
509
}
510
 
511
lunr.Pipeline.registeredFunctions = Object.create(null)
512
 
513
/**
514
 * A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token
515
 * string as well as all known metadata. A pipeline function can mutate the token string
516
 * or mutate (or add) metadata for a given token.
517
 *
518
 * A pipeline function can indicate that the passed token should be discarded by returning
519
 * null, undefined or an empty string. This token will not be passed to any downstream pipeline
520
 * functions and will not be added to the index.
521
 *
522
 * Multiple tokens can be returned by returning an array of tokens. Each token will be passed
523
 * to any downstream pipeline functions and all will returned tokens will be added to the index.
524
 *
525
 * Any number of pipeline functions may be chained together using a lunr.Pipeline.
526
 *
527
 * @interface lunr.PipelineFunction
528
 * @param {lunr.Token} token - A token from the document being processed.
529
 * @param {number} i - The index of this token in the complete list of tokens for this document/field.
530
 * @param {lunr.Token[]} tokens - All tokens for this document/field.
531
 * @returns {(?lunr.Token|lunr.Token[])}
532
 */
533
 
534
/**
535
 * Register a function with the pipeline.
536
 *
537
 * Functions that are used in the pipeline should be registered if the pipeline
538
 * needs to be serialised, or a serialised pipeline needs to be loaded.
539
 *
540
 * Registering a function does not add it to a pipeline, functions must still be
541
 * added to instances of the pipeline for them to be used when running a pipeline.
542
 *
543
 * @param {lunr.PipelineFunction} fn - The function to check for.
544
 * @param {String} label - The label to register this function with
545
 */
546
lunr.Pipeline.registerFunction = function (fn, label) {
547
  if (label in this.registeredFunctions) {
548
    lunr.utils.warn('Overwriting existing registered function: ' + label)
549
  }
550
 
551
  fn.label = label
552
  lunr.Pipeline.registeredFunctions[fn.label] = fn
553
}
554
 
555
/**
556
 * Warns if the function is not registered as a Pipeline function.
557
 *
558
 * @param {lunr.PipelineFunction} fn - The function to check for.
559
 * @private
560
 */
561
lunr.Pipeline.warnIfFunctionNotRegistered = function (fn) {
562
  var isRegistered = fn.label && (fn.label in this.registeredFunctions)
563
 
564
  if (!isRegistered) {
565
    lunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\n', fn)
566
  }
567
}
568
 
569
/**
570
 * Loads a previously serialised pipeline.
571
 *
572
 * All functions to be loaded must already be registered with lunr.Pipeline.
573
 * If any function from the serialised data has not been registered then an
574
 * error will be thrown.
575
 *
576
 * @param {Object} serialised - The serialised pipeline to load.
577
 * @returns {lunr.Pipeline}
578
 */
579
lunr.Pipeline.load = function (serialised) {
580
  var pipeline = new lunr.Pipeline
581
 
582
  serialised.forEach(function (fnName) {
583
    var fn = lunr.Pipeline.registeredFunctions[fnName]
584
 
585
    if (fn) {
586
      pipeline.add(fn)
587
    } else {
588
      throw new Error('Cannot load unregistered function: ' + fnName)
589
    }
590
  })
591
 
592
  return pipeline
593
}
594
 
595
/**
596
 * Adds new functions to the end of the pipeline.
597
 *
598
 * Logs a warning if the function has not been registered.
599
 *
600
 * @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline.
601
 */
602
lunr.Pipeline.prototype.add = function () {
603
  var fns = Array.prototype.slice.call(arguments)
604
 
605
  fns.forEach(function (fn) {
606
    lunr.Pipeline.warnIfFunctionNotRegistered(fn)
607
    this._stack.push(fn)
608
  }, this)
609
}
610
 
611
/**
612
 * Adds a single function after a function that already exists in the
613
 * pipeline.
614
 *
615
 * Logs a warning if the function has not been registered.
616
 *
617
 * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
618
 * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
619
 */
620
lunr.Pipeline.prototype.after = function (existingFn, newFn) {
621
  lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
622
 
623
  var pos = this._stack.indexOf(existingFn)
624
  if (pos == -1) {
625
    throw new Error('Cannot find existingFn')
626
  }
627
 
628
  pos = pos + 1
629
  this._stack.splice(pos, 0, newFn)
630
}
631
 
632
/**
633
 * Adds a single function before a function that already exists in the
634
 * pipeline.
635
 *
636
 * Logs a warning if the function has not been registered.
637
 *
638
 * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
639
 * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
640
 */
641
lunr.Pipeline.prototype.before = function (existingFn, newFn) {
642
  lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
643
 
644
  var pos = this._stack.indexOf(existingFn)
645
  if (pos == -1) {
646
    throw new Error('Cannot find existingFn')
647
  }
648
 
649
  this._stack.splice(pos, 0, newFn)
650
}
651
 
652
/**
653
 * Removes a function from the pipeline.
654
 *
655
 * @param {lunr.PipelineFunction} fn The function to remove from the pipeline.
656
 */
657
lunr.Pipeline.prototype.remove = function (fn) {
658
  var pos = this._stack.indexOf(fn)
659
  if (pos == -1) {
660
    return
661
  }
662
 
663
  this._stack.splice(pos, 1)
664
}
665
 
666
/**
667
 * Runs the current list of functions that make up the pipeline against the
668
 * passed tokens.
669
 *
670
 * @param {Array} tokens The tokens to run through the pipeline.
671
 * @returns {Array}
672
 */
673
lunr.Pipeline.prototype.run = function (tokens) {
674
  var stackLength = this._stack.length
675
 
676
  for (var i = 0; i < stackLength; i++) {
677
    var fn = this._stack[i]
678
    var memo = []
679
 
680
    for (var j = 0; j < tokens.length; j++) {
681
      var result = fn(tokens[j], j, tokens)
682
 
683
      if (result === null || result === void 0 || result === '') continue
684
 
685
      if (Array.isArray(result)) {
686
        for (var k = 0; k < result.length; k++) {
687
          memo.push(result[k])
688
        }
689
      } else {
690
        memo.push(result)
691
      }
692
    }
693
 
694
    tokens = memo
695
  }
696
 
697
  return tokens
698
}
699
 
700
/**
701
 * Convenience method for passing a string through a pipeline and getting
702
 * strings out. This method takes care of wrapping the passed string in a
703
 * token and mapping the resulting tokens back to strings.
704
 *
705
 * @param {string} str - The string to pass through the pipeline.
706
 * @param {?object} metadata - Optional metadata to associate with the token
707
 * passed to the pipeline.
708
 * @returns {string[]}
709
 */
710
lunr.Pipeline.prototype.runString = function (str, metadata) {
711
  var token = new lunr.Token (str, metadata)
712
 
713
  return this.run([token]).map(function (t) {
714
    return t.toString()
715
  })
716
}
717
 
718
/**
719
 * Resets the pipeline by removing any existing processors.
720
 *
721
 */
722
lunr.Pipeline.prototype.reset = function () {
723
  this._stack = []
724
}
725
 
726
/**
727
 * Returns a representation of the pipeline ready for serialisation.
728
 *
729
 * Logs a warning if the function has not been registered.
730
 *
731
 * @returns {Array}
732
 */
733
lunr.Pipeline.prototype.toJSON = function () {
734
  return this._stack.map(function (fn) {
735
    lunr.Pipeline.warnIfFunctionNotRegistered(fn)
736
 
737
    return fn.label
738
  })
739
}
740
/*!
741
 * lunr.Vector
742
 * Copyright (C) 2020 Oliver Nightingale
743
 */
744
 
745
/**
746
 * A vector is used to construct the vector space of documents and queries. These
747
 * vectors support operations to determine the similarity between two documents or
748
 * a document and a query.
749
 *
750
 * Normally no parameters are required for initializing a vector, but in the case of
751
 * loading a previously dumped vector the raw elements can be provided to the constructor.
752
 *
753
 * For performance reasons vectors are implemented with a flat array, where an elements
754
 * index is immediately followed by its value. E.g. [index, value, index, value]. This
755
 * allows the underlying array to be as sparse as possible and still offer decent
756
 * performance when being used for vector calculations.
757
 *
758
 * @constructor
759
 * @param {Number[]} [elements] - The flat list of element index and element value pairs.
760
 */
761
lunr.Vector = function (elements) {
762
  this._magnitude = 0
763
  this.elements = elements || []
764
}
765
 
766
 
767
/**
768
 * Calculates the position within the vector to insert a given index.
769
 *
770
 * This is used internally by insert and upsert. If there are duplicate indexes then
771
 * the position is returned as if the value for that index were to be updated, but it
772
 * is the callers responsibility to check whether there is a duplicate at that index
773
 *
774
 * @param {Number} insertIdx - The index at which the element should be inserted.
775
 * @returns {Number}
776
 */
777
lunr.Vector.prototype.positionForIndex = function (index) {
778
  // For an empty vector the tuple can be inserted at the beginning
779
  if (this.elements.length == 0) {
780
    return 0
781
  }
782
 
783
  var start = 0,
784
      end = this.elements.length / 2,
785
      sliceLength = end - start,
786
      pivotPoint = Math.floor(sliceLength / 2),
787
      pivotIndex = this.elements[pivotPoint * 2]
788
 
789
  while (sliceLength > 1) {
790
    if (pivotIndex < index) {
791
      start = pivotPoint
792
    }
793
 
794
    if (pivotIndex > index) {
795
      end = pivotPoint
796
    }
797
 
798
    if (pivotIndex == index) {
799
      break
800
    }
801
 
802
    sliceLength = end - start
803
    pivotPoint = start + Math.floor(sliceLength / 2)
804
    pivotIndex = this.elements[pivotPoint * 2]
805
  }
806
 
807
  if (pivotIndex == index) {
808
    return pivotPoint * 2
809
  }
810
 
811
  if (pivotIndex > index) {
812
    return pivotPoint * 2
813
  }
814
 
815
  if (pivotIndex < index) {
816
    return (pivotPoint + 1) * 2
817
  }
818
}
819
 
820
/**
821
 * Inserts an element at an index within the vector.
822
 *
823
 * Does not allow duplicates, will throw an error if there is already an entry
824
 * for this index.
825
 *
826
 * @param {Number} insertIdx - The index at which the element should be inserted.
827
 * @param {Number} val - The value to be inserted into the vector.
828
 */
829
lunr.Vector.prototype.insert = function (insertIdx, val) {
830
  this.upsert(insertIdx, val, function () {
831
    throw "duplicate index"
832
  })
833
}
834
 
835
/**
836
 * Inserts or updates an existing index within the vector.
837
 *
838
 * @param {Number} insertIdx - The index at which the element should be inserted.
839
 * @param {Number} val - The value to be inserted into the vector.
840
 * @param {function} fn - A function that is called for updates, the existing value and the
841
 * requested value are passed as arguments
842
 */
843
lunr.Vector.prototype.upsert = function (insertIdx, val, fn) {
844
  this._magnitude = 0
845
  var position = this.positionForIndex(insertIdx)
846
 
847
  if (this.elements[position] == insertIdx) {
848
    this.elements[position + 1] = fn(this.elements[position + 1], val)
849
  } else {
850
    this.elements.splice(position, 0, insertIdx, val)
851
  }
852
}
853
 
854
/**
855
 * Calculates the magnitude of this vector.
856
 *
857
 * @returns {Number}
858
 */
859
lunr.Vector.prototype.magnitude = function () {
860
  if (this._magnitude) return this._magnitude
861
 
862
  var sumOfSquares = 0,
863
      elementsLength = this.elements.length
864
 
865
  for (var i = 1; i < elementsLength; i += 2) {
866
    var val = this.elements[i]
867
    sumOfSquares += val * val
868
  }
869
 
870
  return this._magnitude = Math.sqrt(sumOfSquares)
871
}
872
 
873
/**
874
 * Calculates the dot product of this vector and another vector.
875
 *
876
 * @param {lunr.Vector} otherVector - The vector to compute the dot product with.
877
 * @returns {Number}
878
 */
879
lunr.Vector.prototype.dot = function (otherVector) {
880
  var dotProduct = 0,
881
      a = this.elements, b = otherVector.elements,
882
      aLen = a.length, bLen = b.length,
883
      aVal = 0, bVal = 0,
884
      i = 0, j = 0
885
 
886
  while (i < aLen && j < bLen) {
887
    aVal = a[i], bVal = b[j]
888
    if (aVal < bVal) {
889
      i += 2
890
    } else if (aVal > bVal) {
891
      j += 2
892
    } else if (aVal == bVal) {
893
      dotProduct += a[i + 1] * b[j + 1]
894
      i += 2
895
      j += 2
896
    }
897
  }
898
 
899
  return dotProduct
900
}
901
 
902
/**
903
 * Calculates the similarity between this vector and another vector.
904
 *
905
 * @param {lunr.Vector} otherVector - The other vector to calculate the
906
 * similarity with.
907
 * @returns {Number}
908
 */
909
lunr.Vector.prototype.similarity = function (otherVector) {
910
  return this.dot(otherVector) / this.magnitude() || 0
911
}
912
 
913
/**
914
 * Converts the vector to an array of the elements within the vector.
915
 *
916
 * @returns {Number[]}
917
 */
918
lunr.Vector.prototype.toArray = function () {
919
  var output = new Array (this.elements.length / 2)
920
 
921
  for (var i = 1, j = 0; i < this.elements.length; i += 2, j++) {
922
    output[j] = this.elements[i]
923
  }
924
 
925
  return output
926
}
927
 
928
/**
929
 * A JSON serializable representation of the vector.
930
 *
931
 * @returns {Number[]}
932
 */
933
lunr.Vector.prototype.toJSON = function () {
934
  return this.elements
935
}
936
/* eslint-disable */
937
/*!
938
 * lunr.stemmer
939
 * Copyright (C) 2020 Oliver Nightingale
940
 * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt
941
 */
942
 
943
/**
944
 * lunr.stemmer is an english language stemmer, this is a JavaScript
945
 * implementation of the PorterStemmer taken from http://tartarus.org/~martin
946
 *
947
 * @static
948
 * @implements {lunr.PipelineFunction}
949
 * @param {lunr.Token} token - The string to stem
950
 * @returns {lunr.Token}
951
 * @see {@link lunr.Pipeline}
952
 * @function
953
 */
954
lunr.stemmer = (function(){
955
  var step2list = {
956
      "ational" : "ate",
957
      "tional" : "tion",
958
      "enci" : "ence",
959
      "anci" : "ance",
960
      "izer" : "ize",
961
      "bli" : "ble",
962
      "alli" : "al",
963
      "entli" : "ent",
964
      "eli" : "e",
965
      "ousli" : "ous",
966
      "ization" : "ize",
967
      "ation" : "ate",
968
      "ator" : "ate",
969
      "alism" : "al",
970
      "iveness" : "ive",
971
      "fulness" : "ful",
972
      "ousness" : "ous",
973
      "aliti" : "al",
974
      "iviti" : "ive",
975
      "biliti" : "ble",
976
      "logi" : "log"
977
    },
978
 
979
    step3list = {
980
      "icate" : "ic",
981
      "ative" : "",
982
      "alize" : "al",
983
      "iciti" : "ic",
984
      "ical" : "ic",
985
      "ful" : "",
986
      "ness" : ""
987
    },
988
 
989
    c = "[^aeiou]",          // consonant
990
    v = "[aeiouy]",          // vowel
991
    C = c + "[^aeiouy]*",    // consonant sequence
992
    V = v + "[aeiou]*",      // vowel sequence
993
 
994
    mgr0 = "^(" + C + ")?" + V + C,               // [C]VC... is m>0
995
    meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$",  // [C]VC[V] is m=1
996
    mgr1 = "^(" + C + ")?" + V + C + V + C,       // [C]VCVC... is m>1
997
    s_v = "^(" + C + ")?" + v;                   // vowel in stem
998
 
999
  var re_mgr0 = new RegExp(mgr0);
1000
  var re_mgr1 = new RegExp(mgr1);
1001
  var re_meq1 = new RegExp(meq1);
1002
  var re_s_v = new RegExp(s_v);
1003
 
1004
  var re_1a = /^(.+?)(ss|i)es$/;
1005
  var re2_1a = /^(.+?)([^s])s$/;
1006
  var re_1b = /^(.+?)eed$/;
1007
  var re2_1b = /^(.+?)(ed|ing)$/;
1008
  var re_1b_2 = /.$/;
1009
  var re2_1b_2 = /(at|bl|iz)$/;
1010
  var re3_1b_2 = new RegExp("([^aeiouylsz])\\1$");
1011
  var re4_1b_2 = new RegExp("^" + C + v + "[^aeiouwxy]$");
1012
 
1013
  var re_1c = /^(.+?[^aeiou])y$/;
1014
  var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
1015
 
1016
  var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
1017
 
1018
  var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
1019
  var re2_4 = /^(.+?)(s|t)(ion)$/;
1020
 
1021
  var re_5 = /^(.+?)e$/;
1022
  var re_5_1 = /ll$/;
1023
  var re3_5 = new RegExp("^" + C + v + "[^aeiouwxy]$");
1024
 
1025
  var porterStemmer = function porterStemmer(w) {
1026
    var stem,
1027
      suffix,
1028
      firstch,
1029
      re,
1030
      re2,
1031
      re3,
1032
      re4;
1033
 
1034
    if (w.length < 3) { return w; }
1035
 
1036
    firstch = w.substr(0,1);
1037
    if (firstch == "y") {
1038
      w = firstch.toUpperCase() + w.substr(1);
1039
    }
1040
 
1041
    // Step 1a
1042
    re = re_1a
1043
    re2 = re2_1a;
1044
 
1045
    if (re.test(w)) { w = w.replace(re,"$1$2"); }
1046
    else if (re2.test(w)) { w = w.replace(re2,"$1$2"); }
1047
 
1048
    // Step 1b
1049
    re = re_1b;
1050
    re2 = re2_1b;
1051
    if (re.test(w)) {
1052
      var fp = re.exec(w);
1053
      re = re_mgr0;
1054
      if (re.test(fp[1])) {
1055
        re = re_1b_2;
1056
        w = w.replace(re,"");
1057
      }
1058
    } else if (re2.test(w)) {
1059
      var fp = re2.exec(w);
1060
      stem = fp[1];
1061
      re2 = re_s_v;
1062
      if (re2.test(stem)) {
1063
        w = stem;
1064
        re2 = re2_1b_2;
1065
        re3 = re3_1b_2;
1066
        re4 = re4_1b_2;
1067
        if (re2.test(w)) { w = w + "e"; }
1068
        else if (re3.test(w)) { re = re_1b_2; w = w.replace(re,""); }
1069
        else if (re4.test(w)) { w = w + "e"; }
1070
      }
1071
    }
1072
 
1073
    // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say)
1074
    re = re_1c;
1075
    if (re.test(w)) {
1076
      var fp = re.exec(w);
1077
      stem = fp[1];
1078
      w = stem + "i";
1079
    }
1080
 
1081
    // Step 2
1082
    re = re_2;
1083
    if (re.test(w)) {
1084
      var fp = re.exec(w);
1085
      stem = fp[1];
1086
      suffix = fp[2];
1087
      re = re_mgr0;
1088
      if (re.test(stem)) {
1089
        w = stem + step2list[suffix];
1090
      }
1091
    }
1092
 
1093
    // Step 3
1094
    re = re_3;
1095
    if (re.test(w)) {
1096
      var fp = re.exec(w);
1097
      stem = fp[1];
1098
      suffix = fp[2];
1099
      re = re_mgr0;
1100
      if (re.test(stem)) {
1101
        w = stem + step3list[suffix];
1102
      }
1103
    }
1104
 
1105
    // Step 4
1106
    re = re_4;
1107
    re2 = re2_4;
1108
    if (re.test(w)) {
1109
      var fp = re.exec(w);
1110
      stem = fp[1];
1111
      re = re_mgr1;
1112
      if (re.test(stem)) {
1113
        w = stem;
1114
      }
1115
    } else if (re2.test(w)) {
1116
      var fp = re2.exec(w);
1117
      stem = fp[1] + fp[2];
1118
      re2 = re_mgr1;
1119
      if (re2.test(stem)) {
1120
        w = stem;
1121
      }
1122
    }
1123
 
1124
    // Step 5
1125
    re = re_5;
1126
    if (re.test(w)) {
1127
      var fp = re.exec(w);
1128
      stem = fp[1];
1129
      re = re_mgr1;
1130
      re2 = re_meq1;
1131
      re3 = re3_5;
1132
      if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) {
1133
        w = stem;
1134
      }
1135
    }
1136
 
1137
    re = re_5_1;
1138
    re2 = re_mgr1;
1139
    if (re.test(w) && re2.test(w)) {
1140
      re = re_1b_2;
1141
      w = w.replace(re,"");
1142
    }
1143
 
1144
    // and turn initial Y back to y
1145
 
1146
    if (firstch == "y") {
1147
      w = firstch.toLowerCase() + w.substr(1);
1148
    }
1149
 
1150
    return w;
1151
  };
1152
 
1153
  return function (token) {
1154
    return token.update(porterStemmer);
1155
  }
1156
})();
1157
 
1158
lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer')
1159
/*!
1160
 * lunr.stopWordFilter
1161
 * Copyright (C) 2020 Oliver Nightingale
1162
 */
1163
 
1164
/**
1165
 * lunr.generateStopWordFilter builds a stopWordFilter function from the provided
1166
 * list of stop words.
1167
 *
1168
 * The built in lunr.stopWordFilter is built using this generator and can be used
1169
 * to generate custom stopWordFilters for applications or non English languages.
1170
 *
1171
 * @function
1172
 * @param {Array} token The token to pass through the filter
1173
 * @returns {lunr.PipelineFunction}
1174
 * @see lunr.Pipeline
1175
 * @see lunr.stopWordFilter
1176
 */
1177
lunr.generateStopWordFilter = function (stopWords) {
1178
  var words = stopWords.reduce(function (memo, stopWord) {
1179
    memo[stopWord] = stopWord
1180
    return memo
1181
  }, {})
1182
 
1183
  return function (token) {
1184
    if (token && words[token.toString()] !== token.toString()) return token
1185
  }
1186
}
1187
 
1188
/**
1189
 * lunr.stopWordFilter is an English language stop word list filter, any words
1190
 * contained in the list will not be passed through the filter.
1191
 *
1192
 * This is intended to be used in the Pipeline. If the token does not pass the
1193
 * filter then undefined will be returned.
1194
 *
1195
 * @function
1196
 * @implements {lunr.PipelineFunction}
1197
 * @params {lunr.Token} token - A token to check for being a stop word.
1198
 * @returns {lunr.Token}
1199
 * @see {@link lunr.Pipeline}
1200
 */
1201
lunr.stopWordFilter = lunr.generateStopWordFilter([
1202
  'a',
1203
  'able',
1204
  'about',
1205
  'across',
1206
  'after',
1207
  'all',
1208
  'almost',
1209
  'also',
1210
  'am',
1211
  'among',
1212
  'an',
1213
  'and',
1214
  'any',
1215
  'are',
1216
  'as',
1217
  'at',
1218
  'be',
1219
  'because',
1220
  'been',
1221
  'but',
1222
  'by',
1223
  'can',
1224
  'cannot',
1225
  'could',
1226
  'dear',
1227
  'did',
1228
  'do',
1229
  'does',
1230
  'either',
1231
  'else',
1232
  'ever',
1233
  'every',
1234
  'for',
1235
  'from',
1236
  'get',
1237
  'got',
1238
  'had',
1239
  'has',
1240
  'have',
1241
  'he',
1242
  'her',
1243
  'hers',
1244
  'him',
1245
  'his',
1246
  'how',
1247
  'however',
1248
  'i',
1249
  'if',
1250
  'in',
1251
  'into',
1252
  'is',
1253
  'it',
1254
  'its',
1255
  'just',
1256
  'least',
1257
  'let',
1258
  'like',
1259
  'likely',
1260
  'may',
1261
  'me',
1262
  'might',
1263
  'most',
1264
  'must',
1265
  'my',
1266
  'neither',
1267
  'no',
1268
  'nor',
1269
  'not',
1270
  'of',
1271
  'off',
1272
  'often',
1273
  'on',
1274
  'only',
1275
  'or',
1276
  'other',
1277
  'our',
1278
  'own',
1279
  'rather',
1280
  'said',
1281
  'say',
1282
  'says',
1283
  'she',
1284
  'should',
1285
  'since',
1286
  'so',
1287
  'some',
1288
  'than',
1289
  'that',
1290
  'the',
1291
  'their',
1292
  'them',
1293
  'then',
1294
  'there',
1295
  'these',
1296
  'they',
1297
  'this',
1298
  'tis',
1299
  'to',
1300
  'too',
1301
  'twas',
1302
  'us',
1303
  'wants',
1304
  'was',
1305
  'we',
1306
  'were',
1307
  'what',
1308
  'when',
1309
  'where',
1310
  'which',
1311
  'while',
1312
  'who',
1313
  'whom',
1314
  'why',
1315
  'will',
1316
  'with',
1317
  'would',
1318
  'yet',
1319
  'you',
1320
  'your'
1321
])
1322
 
1323
lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter')
1324
/*!
1325
 * lunr.trimmer
1326
 * Copyright (C) 2020 Oliver Nightingale
1327
 */
1328
 
1329
/**
1330
 * lunr.trimmer is a pipeline function for trimming non word
1331
 * characters from the beginning and end of tokens before they
1332
 * enter the index.
1333
 *
1334
 * This implementation may not work correctly for non latin
1335
 * characters and should either be removed or adapted for use
1336
 * with languages with non-latin characters.
1337
 *
1338
 * @static
1339
 * @implements {lunr.PipelineFunction}
1340
 * @param {lunr.Token} token The token to pass through the filter
1341
 * @returns {lunr.Token}
1342
 * @see lunr.Pipeline
1343
 */
1344
lunr.trimmer = function (token) {
1345
  return token.update(function (s) {
1346
    return s.replace(/^\W+/, '').replace(/\W+$/, '')
1347
  })
1348
}
1349
 
1350
lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer')
1351
/*!
1352
 * lunr.TokenSet
1353
 * Copyright (C) 2020 Oliver Nightingale
1354
 */
1355
 
1356
/**
1357
 * A token set is used to store the unique list of all tokens
1358
 * within an index. Token sets are also used to represent an
1359
 * incoming query to the index, this query token set and index
1360
 * token set are then intersected to find which tokens to look
1361
 * up in the inverted index.
1362
 *
1363
 * A token set can hold multiple tokens, as in the case of the
1364
 * index token set, or it can hold a single token as in the
1365
 * case of a simple query token set.
1366
 *
1367
 * Additionally token sets are used to perform wildcard matching.
1368
 * Leading, contained and trailing wildcards are supported, and
1369
 * from this edit distance matching can also be provided.
1370
 *
1371
 * Token sets are implemented as a minimal finite state automata,
1372
 * where both common prefixes and suffixes are shared between tokens.
1373
 * This helps to reduce the space used for storing the token set.
1374
 *
1375
 * @constructor
1376
 */
1377
lunr.TokenSet = function () {
1378
  this.final = false
1379
  this.edges = {}
1380
  this.id = lunr.TokenSet._nextId
1381
  lunr.TokenSet._nextId += 1
1382
}
1383
 
1384
/**
1385
 * Keeps track of the next, auto increment, identifier to assign
1386
 * to a new tokenSet.
1387
 *
1388
 * TokenSets require a unique identifier to be correctly minimised.
1389
 *
1390
 * @private
1391
 */
1392
lunr.TokenSet._nextId = 1
1393
 
1394
/**
1395
 * Creates a TokenSet instance from the given sorted array of words.
1396
 *
1397
 * @param {String[]} arr - A sorted array of strings to create the set from.
1398
 * @returns {lunr.TokenSet}
1399
 * @throws Will throw an error if the input array is not sorted.
1400
 */
1401
lunr.TokenSet.fromArray = function (arr) {
1402
  var builder = new lunr.TokenSet.Builder
1403
 
1404
  for (var i = 0, len = arr.length; i < len; i++) {
1405
    builder.insert(arr[i])
1406
  }
1407
 
1408
  builder.finish()
1409
  return builder.root
1410
}
1411
 
1412
/**
1413
 * Creates a token set from a query clause.
1414
 *
1415
 * @private
1416
 * @param {Object} clause - A single clause from lunr.Query.
1417
 * @param {string} clause.term - The query clause term.
1418
 * @param {number} [clause.editDistance] - The optional edit distance for the term.
1419
 * @returns {lunr.TokenSet}
1420
 */
1421
lunr.TokenSet.fromClause = function (clause) {
1422
  if ('editDistance' in clause) {
1423
    return lunr.TokenSet.fromFuzzyString(clause.term, clause.editDistance)
1424
  } else {
1425
    return lunr.TokenSet.fromString(clause.term)
1426
  }
1427
}
1428
 
1429
/**
1430
 * Creates a token set representing a single string with a specified
1431
 * edit distance.
1432
 *
1433
 * Insertions, deletions, substitutions and transpositions are each
1434
 * treated as an edit distance of 1.
1435
 *
1436
 * Increasing the allowed edit distance will have a dramatic impact
1437
 * on the performance of both creating and intersecting these TokenSets.
1438
 * It is advised to keep the edit distance less than 3.
1439
 *
1440
 * @param {string} str - The string to create the token set from.
1441
 * @param {number} editDistance - The allowed edit distance to match.
1442
 * @returns {lunr.Vector}
1443
 */
1444
lunr.TokenSet.fromFuzzyString = function (str, editDistance) {
1445
  var root = new lunr.TokenSet
1446
 
1447
  var stack = [{
1448
    node: root,
1449
    editsRemaining: editDistance,
1450
    str: str
1451
  }]
1452
 
1453
  while (stack.length) {
1454
    var frame = stack.pop()
1455
 
1456
    // no edit
1457
    if (frame.str.length > 0) {
1458
      var char = frame.str.charAt(0),
1459
          noEditNode
1460
 
1461
      if (char in frame.node.edges) {
1462
        noEditNode = frame.node.edges[char]
1463
      } else {
1464
        noEditNode = new lunr.TokenSet
1465
        frame.node.edges[char] = noEditNode
1466
      }
1467
 
1468
      if (frame.str.length == 1) {
1469
        noEditNode.final = true
1470
      }
1471
 
1472
      stack.push({
1473
        node: noEditNode,
1474
        editsRemaining: frame.editsRemaining,
1475
        str: frame.str.slice(1)
1476
      })
1477
    }
1478
 
1479
    if (frame.editsRemaining == 0) {
1480
      continue
1481
    }
1482
 
1483
    // insertion
1484
    if ("*" in frame.node.edges) {
1485
      var insertionNode = frame.node.edges["*"]
1486
    } else {
1487
      var insertionNode = new lunr.TokenSet
1488
      frame.node.edges["*"] = insertionNode
1489
    }
1490
 
1491
    if (frame.str.length == 0) {
1492
      insertionNode.final = true
1493
    }
1494
 
1495
    stack.push({
1496
      node: insertionNode,
1497
      editsRemaining: frame.editsRemaining - 1,
1498
      str: frame.str
1499
    })
1500
 
1501
    // deletion
1502
    // can only do a deletion if we have enough edits remaining
1503
    // and if there are characters left to delete in the string
1504
    if (frame.str.length > 1) {
1505
      stack.push({
1506
        node: frame.node,
1507
        editsRemaining: frame.editsRemaining - 1,
1508
        str: frame.str.slice(1)
1509
      })
1510
    }
1511
 
1512
    // deletion
1513
    // just removing the last character from the str
1514
    if (frame.str.length == 1) {
1515
      frame.node.final = true
1516
    }
1517
 
1518
    // substitution
1519
    // can only do a substitution if we have enough edits remaining
1520
    // and if there are characters left to substitute
1521
    if (frame.str.length >= 1) {
1522
      if ("*" in frame.node.edges) {
1523
        var substitutionNode = frame.node.edges["*"]
1524
      } else {
1525
        var substitutionNode = new lunr.TokenSet
1526
        frame.node.edges["*"] = substitutionNode
1527
      }
1528
 
1529
      if (frame.str.length == 1) {
1530
        substitutionNode.final = true
1531
      }
1532
 
1533
      stack.push({
1534
        node: substitutionNode,
1535
        editsRemaining: frame.editsRemaining - 1,
1536
        str: frame.str.slice(1)
1537
      })
1538
    }
1539
 
1540
    // transposition
1541
    // can only do a transposition if there are edits remaining
1542
    // and there are enough characters to transpose
1543
    if (frame.str.length > 1) {
1544
      var charA = frame.str.charAt(0),
1545
          charB = frame.str.charAt(1),
1546
          transposeNode
1547
 
1548
      if (charB in frame.node.edges) {
1549
        transposeNode = frame.node.edges[charB]
1550
      } else {
1551
        transposeNode = new lunr.TokenSet
1552
        frame.node.edges[charB] = transposeNode
1553
      }
1554
 
1555
      if (frame.str.length == 1) {
1556
        transposeNode.final = true
1557
      }
1558
 
1559
      stack.push({
1560
        node: transposeNode,
1561
        editsRemaining: frame.editsRemaining - 1,
1562
        str: charA + frame.str.slice(2)
1563
      })
1564
    }
1565
  }
1566
 
1567
  return root
1568
}
1569
 
1570
/**
1571
 * Creates a TokenSet from a string.
1572
 *
1573
 * The string may contain one or more wildcard characters (*)
1574
 * that will allow wildcard matching when intersecting with
1575
 * another TokenSet.
1576
 *
1577
 * @param {string} str - The string to create a TokenSet from.
1578
 * @returns {lunr.TokenSet}
1579
 */
1580
lunr.TokenSet.fromString = function (str) {
1581
  var node = new lunr.TokenSet,
1582
      root = node
1583
 
1584
  /*
1585
   * Iterates through all characters within the passed string
1586
   * appending a node for each character.
1587
   *
1588
   * When a wildcard character is found then a self
1589
   * referencing edge is introduced to continually match
1590
   * any number of any characters.
1591
   */
1592
  for (var i = 0, len = str.length; i < len; i++) {
1593
    var char = str[i],
1594
        final = (i == len - 1)
1595
 
1596
    if (char == "*") {
1597
      node.edges[char] = node
1598
      node.final = final
1599
 
1600
    } else {
1601
      var next = new lunr.TokenSet
1602
      next.final = final
1603
 
1604
      node.edges[char] = next
1605
      node = next
1606
    }
1607
  }
1608
 
1609
  return root
1610
}
1611
 
1612
/**
1613
 * Converts this TokenSet into an array of strings
1614
 * contained within the TokenSet.
1615
 *
1616
 * This is not intended to be used on a TokenSet that
1617
 * contains wildcards, in these cases the results are
1618
 * undefined and are likely to cause an infinite loop.
1619
 *
1620
 * @returns {string[]}
1621
 */
1622
lunr.TokenSet.prototype.toArray = function () {
1623
  var words = []
1624
 
1625
  var stack = [{
1626
    prefix: "",
1627
    node: this
1628
  }]
1629
 
1630
  while (stack.length) {
1631
    var frame = stack.pop(),
1632
        edges = Object.keys(frame.node.edges),
1633
        len = edges.length
1634
 
1635
    if (frame.node.final) {
1636
      /* In Safari, at this point the prefix is sometimes corrupted, see:
1637
       * https://github.com/olivernn/lunr.js/issues/279 Calling any
1638
       * String.prototype method forces Safari to "cast" this string to what
1639
       * it's supposed to be, fixing the bug. */
1640
      frame.prefix.charAt(0)
1641
      words.push(frame.prefix)
1642
    }
1643
 
1644
    for (var i = 0; i < len; i++) {
1645
      var edge = edges[i]
1646
 
1647
      stack.push({
1648
        prefix: frame.prefix.concat(edge),
1649
        node: frame.node.edges[edge]
1650
      })
1651
    }
1652
  }
1653
 
1654
  return words
1655
}
1656
 
1657
/**
1658
 * Generates a string representation of a TokenSet.
1659
 *
1660
 * This is intended to allow TokenSets to be used as keys
1661
 * in objects, largely to aid the construction and minimisation
1662
 * of a TokenSet. As such it is not designed to be a human
1663
 * friendly representation of the TokenSet.
1664
 *
1665
 * @returns {string}
1666
 */
1667
lunr.TokenSet.prototype.toString = function () {
1668
  // NOTE: Using Object.keys here as this.edges is very likely
1669
  // to enter 'hash-mode' with many keys being added
1670
  //
1671
  // avoiding a for-in loop here as it leads to the function
1672
  // being de-optimised (at least in V8). From some simple
1673
  // benchmarks the performance is comparable, but allowing
1674
  // V8 to optimize may mean easy performance wins in the future.
1675
 
1676
  if (this._str) {
1677
    return this._str
1678
  }
1679
 
1680
  var str = this.final ? '1' : '0',
1681
      labels = Object.keys(this.edges).sort(),
1682
      len = labels.length
1683
 
1684
  for (var i = 0; i < len; i++) {
1685
    var label = labels[i],
1686
        node = this.edges[label]
1687
 
1688
    str = str + label + node.id
1689
  }
1690
 
1691
  return str
1692
}
1693
 
1694
/**
1695
 * Returns a new TokenSet that is the intersection of
1696
 * this TokenSet and the passed TokenSet.
1697
 *
1698
 * This intersection will take into account any wildcards
1699
 * contained within the TokenSet.
1700
 *
1701
 * @param {lunr.TokenSet} b - An other TokenSet to intersect with.
1702
 * @returns {lunr.TokenSet}
1703
 */
1704
lunr.TokenSet.prototype.intersect = function (b) {
1705
  var output = new lunr.TokenSet,
1706
      frame = undefined
1707
 
1708
  var stack = [{
1709
    qNode: b,
1710
    output: output,
1711
    node: this
1712
  }]
1713
 
1714
  while (stack.length) {
1715
    frame = stack.pop()
1716
 
1717
    // NOTE: As with the #toString method, we are using
1718
    // Object.keys and a for loop instead of a for-in loop
1719
    // as both of these objects enter 'hash' mode, causing
1720
    // the function to be de-optimised in V8
1721
    var qEdges = Object.keys(frame.qNode.edges),
1722
        qLen = qEdges.length,
1723
        nEdges = Object.keys(frame.node.edges),
1724
        nLen = nEdges.length
1725
 
1726
    for (var q = 0; q < qLen; q++) {
1727
      var qEdge = qEdges[q]
1728
 
1729
      for (var n = 0; n < nLen; n++) {
1730
        var nEdge = nEdges[n]
1731
 
1732
        if (nEdge == qEdge || qEdge == '*') {
1733
          var node = frame.node.edges[nEdge],
1734
              qNode = frame.qNode.edges[qEdge],
1735
              final = node.final && qNode.final,
1736
              next = undefined
1737
 
1738
          if (nEdge in frame.output.edges) {
1739
            // an edge already exists for this character
1740
            // no need to create a new node, just set the finality
1741
            // bit unless this node is already final
1742
            next = frame.output.edges[nEdge]
1743
            next.final = next.final || final
1744
 
1745
          } else {
1746
            // no edge exists yet, must create one
1747
            // set the finality bit and insert it
1748
            // into the output
1749
            next = new lunr.TokenSet
1750
            next.final = final
1751
            frame.output.edges[nEdge] = next
1752
          }
1753
 
1754
          stack.push({
1755
            qNode: qNode,
1756
            output: next,
1757
            node: node
1758
          })
1759
        }
1760
      }
1761
    }
1762
  }
1763
 
1764
  return output
1765
}
1766
lunr.TokenSet.Builder = function () {
1767
  this.previousWord = ""
1768
  this.root = new lunr.TokenSet
1769
  this.uncheckedNodes = []
1770
  this.minimizedNodes = {}
1771
}
1772
 
1773
lunr.TokenSet.Builder.prototype.insert = function (word) {
1774
  var node,
1775
      commonPrefix = 0
1776
 
1777
  if (word < this.previousWord) {
1778
    throw new Error ("Out of order word insertion")
1779
  }
1780
 
1781
  for (var i = 0; i < word.length && i < this.previousWord.length; i++) {
1782
    if (word[i] != this.previousWord[i]) break
1783
    commonPrefix++
1784
  }
1785
 
1786
  this.minimize(commonPrefix)
1787
 
1788
  if (this.uncheckedNodes.length == 0) {
1789
    node = this.root
1790
  } else {
1791
    node = this.uncheckedNodes[this.uncheckedNodes.length - 1].child
1792
  }
1793
 
1794
  for (var i = commonPrefix; i < word.length; i++) {
1795
    var nextNode = new lunr.TokenSet,
1796
        char = word[i]
1797
 
1798
    node.edges[char] = nextNode
1799
 
1800
    this.uncheckedNodes.push({
1801
      parent: node,
1802
      char: char,
1803
      child: nextNode
1804
    })
1805
 
1806
    node = nextNode
1807
  }
1808
 
1809
  node.final = true
1810
  this.previousWord = word
1811
}
1812
 
1813
lunr.TokenSet.Builder.prototype.finish = function () {
1814
  this.minimize(0)
1815
}
1816
 
1817
lunr.TokenSet.Builder.prototype.minimize = function (downTo) {
1818
  for (var i = this.uncheckedNodes.length - 1; i >= downTo; i--) {
1819
    var node = this.uncheckedNodes[i],
1820
        childKey = node.child.toString()
1821
 
1822
    if (childKey in this.minimizedNodes) {
1823
      node.parent.edges[node.char] = this.minimizedNodes[childKey]
1824
    } else {
1825
      // Cache the key for this node since
1826
      // we know it can't change anymore
1827
      node.child._str = childKey
1828
 
1829
      this.minimizedNodes[childKey] = node.child
1830
    }
1831
 
1832
    this.uncheckedNodes.pop()
1833
  }
1834
}
1835
/*!
1836
 * lunr.Index
1837
 * Copyright (C) 2020 Oliver Nightingale
1838
 */
1839
 
1840
/**
1841
 * An index contains the built index of all documents and provides a query interface
1842
 * to the index.
1843
 *
1844
 * Usually instances of lunr.Index will not be created using this constructor, instead
1845
 * lunr.Builder should be used to construct new indexes, or lunr.Index.load should be
1846
 * used to load previously built and serialized indexes.
1847
 *
1848
 * @constructor
1849
 * @param {Object} attrs - The attributes of the built search index.
1850
 * @param {Object} attrs.invertedIndex - An index of term/field to document reference.
1851
 * @param {Object<string, lunr.Vector>} attrs.fieldVectors - Field vectors
1852
 * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens.
1853
 * @param {string[]} attrs.fields - The names of indexed document fields.
1854
 * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms.
1855
 */
1856
lunr.Index = function (attrs) {
1857
  this.invertedIndex = attrs.invertedIndex
1858
  this.fieldVectors = attrs.fieldVectors
1859
  this.tokenSet = attrs.tokenSet
1860
  this.fields = attrs.fields
1861
  this.pipeline = attrs.pipeline
1862
}
1863
 
1864
/**
1865
 * A result contains details of a document matching a search query.
1866
 * @typedef {Object} lunr.Index~Result
1867
 * @property {string} ref - The reference of the document this result represents.
1868
 * @property {number} score - A number between 0 and 1 representing how similar this document is to the query.
1869
 * @property {lunr.MatchData} matchData - Contains metadata about this match including which term(s) caused the match.
1870
 */
1871
 
1872
/**
1873
 * Although lunr provides the ability to create queries using lunr.Query, it also provides a simple
1874
 * query language which itself is parsed into an instance of lunr.Query.
1875
 *
1876
 * For programmatically building queries it is advised to directly use lunr.Query, the query language
1877
 * is best used for human entered text rather than program generated text.
1878
 *
1879
 * At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported
1880
 * and will be combined with OR, e.g `hello world` will match documents that contain either 'hello'
1881
 * or 'world', though those that contain both will rank higher in the results.
1882
 *
1883
 * Wildcards can be included in terms to match one or more unspecified characters, these wildcards can
1884
 * be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding
1885
 * wildcards will increase the number of documents that will be found but can also have a negative
1886
 * impact on query performance, especially with wildcards at the beginning of a term.
1887
 *
1888
 * Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term
1889
 * hello in the title field will match this query. Using a field not present in the index will lead
1890
 * to an error being thrown.
1891
 *
1892
 * Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term
1893
 * boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported
1894
 * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2.
1895
 * Avoid large values for edit distance to improve query performance.
1896
 *
1897
 * Each term also supports a presence modifier. By default a term's presence in document is optional, however
1898
 * this can be changed to either required or prohibited. For a term's presence to be required in a document the
1899
 * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and
1900
 * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not
1901
 * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'.
1902
 *
1903
 * To escape special characters the backslash character '\' can be used, this allows searches to include
1904
 * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead
1905
 * of attempting to apply a boost of 2 to the search term "foo".
1906
 *
1907
 * @typedef {string} lunr.Index~QueryString
1908
 * @example <caption>Simple single term query</caption>
1909
 * hello
1910
 * @example <caption>Multiple term query</caption>
1911
 * hello world
1912
 * @example <caption>term scoped to a field</caption>
1913
 * title:hello
1914
 * @example <caption>term with a boost of 10</caption>
1915
 * hello^10
1916
 * @example <caption>term with an edit distance of 2</caption>
1917
 * hello~2
1918
 * @example <caption>terms with presence modifiers</caption>
1919
 * -foo +bar baz
1920
 */
1921
 
1922
/**
1923
 * Performs a search against the index using lunr query syntax.
1924
 *
1925
 * Results will be returned sorted by their score, the most relevant results
1926
 * will be returned first.  For details on how the score is calculated, please see
1927
 * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}.
1928
 *
1929
 * For more programmatic querying use lunr.Index#query.
1930
 *
1931
 * @param {lunr.Index~QueryString} queryString - A string containing a lunr query.
1932
 * @throws {lunr.QueryParseError} If the passed query string cannot be parsed.
1933
 * @returns {lunr.Index~Result[]}
1934
 */
1935
lunr.Index.prototype.search = function (queryString) {
1936
  return this.query(function (query) {
1937
    var parser = new lunr.QueryParser(queryString, query)
1938
    parser.parse()
1939
  })
1940
}
1941
 
1942
/**
1943
 * A query builder callback provides a query object to be used to express
1944
 * the query to perform on the index.
1945
 *
1946
 * @callback lunr.Index~queryBuilder
1947
 * @param {lunr.Query} query - The query object to build up.
1948
 * @this lunr.Query
1949
 */
1950
 
1951
/**
1952
 * Performs a query against the index using the yielded lunr.Query object.
1953
 *
1954
 * If performing programmatic queries against the index, this method is preferred
1955
 * over lunr.Index#search so as to avoid the additional query parsing overhead.
1956
 *
1957
 * A query object is yielded to the supplied function which should be used to
1958
 * express the query to be run against the index.
1959
 *
1960
 * Note that although this function takes a callback parameter it is _not_ an
1961
 * asynchronous operation, the callback is just yielded a query object to be
1962
 * customized.
1963
 *
1964
 * @param {lunr.Index~queryBuilder} fn - A function that is used to build the query.
1965
 * @returns {lunr.Index~Result[]}
1966
 */
1967
lunr.Index.prototype.query = function (fn) {
1968
  // for each query clause
1969
  // * process terms
1970
  // * expand terms from token set
1971
  // * find matching documents and metadata
1972
  // * get document vectors
1973
  // * score documents
1974
 
1975
  var query = new lunr.Query(this.fields),
1976
      matchingFields = Object.create(null),
1977
      queryVectors = Object.create(null),
1978
      termFieldCache = Object.create(null),
1979
      requiredMatches = Object.create(null),
1980
      prohibitedMatches = Object.create(null)
1981
 
1982
  /*
1983
   * To support field level boosts a query vector is created per
1984
   * field. An empty vector is eagerly created to support negated
1985
   * queries.
1986
   */
1987
  for (var i = 0; i < this.fields.length; i++) {
1988
    queryVectors[this.fields[i]] = new lunr.Vector
1989
  }
1990
 
1991
  fn.call(query, query)
1992
 
1993
  for (var i = 0; i < query.clauses.length; i++) {
1994
    /*
1995
     * Unless the pipeline has been disabled for this term, which is
1996
     * the case for terms with wildcards, we need to pass the clause
1997
     * term through the search pipeline. A pipeline returns an array
1998
     * of processed terms. Pipeline functions may expand the passed
1999
     * term, which means we may end up performing multiple index lookups
2000
     * for a single query term.
2001
     */
2002
    var clause = query.clauses[i],
2003
        terms = null,
2004
        clauseMatches = lunr.Set.empty
2005
 
2006
    if (clause.usePipeline) {
2007
      terms = this.pipeline.runString(clause.term, {
2008
        fields: clause.fields
2009
      })
2010
    } else {
2011
      terms = [clause.term]
2012
    }
2013
 
2014
    for (var m = 0; m < terms.length; m++) {
2015
      var term = terms[m]
2016
 
2017
      /*
2018
       * Each term returned from the pipeline needs to use the same query
2019
       * clause object, e.g. the same boost and or edit distance. The
2020
       * simplest way to do this is to re-use the clause object but mutate
2021
       * its term property.
2022
       */
2023
      clause.term = term
2024
 
2025
      /*
2026
       * From the term in the clause we create a token set which will then
2027
       * be used to intersect the indexes token set to get a list of terms
2028
       * to lookup in the inverted index
2029
       */
2030
      var termTokenSet = lunr.TokenSet.fromClause(clause),
2031
          expandedTerms = this.tokenSet.intersect(termTokenSet).toArray()
2032
 
2033
      /*
2034
       * If a term marked as required does not exist in the tokenSet it is
2035
       * impossible for the search to return any matches. We set all the field
2036
       * scoped required matches set to empty and stop examining any further
2037
       * clauses.
2038
       */
2039
      if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) {
2040
        for (var k = 0; k < clause.fields.length; k++) {
2041
          var field = clause.fields[k]
2042
          requiredMatches[field] = lunr.Set.empty
2043
        }
2044
 
2045
        break
2046
      }
2047
 
2048
      for (var j = 0; j < expandedTerms.length; j++) {
2049
        /*
2050
         * For each term get the posting and termIndex, this is required for
2051
         * building the query vector.
2052
         */
2053
        var expandedTerm = expandedTerms[j],
2054
            posting = this.invertedIndex[expandedTerm],
2055
            termIndex = posting._index
2056
 
2057
        for (var k = 0; k < clause.fields.length; k++) {
2058
          /*
2059
           * For each field that this query term is scoped by (by default
2060
           * all fields are in scope) we need to get all the document refs
2061
           * that have this term in that field.
2062
           *
2063
           * The posting is the entry in the invertedIndex for the matching
2064
           * term from above.
2065
           */
2066
          var field = clause.fields[k],
2067
              fieldPosting = posting[field],
2068
              matchingDocumentRefs = Object.keys(fieldPosting),
2069
              termField = expandedTerm + "/" + field,
2070
              matchingDocumentsSet = new lunr.Set(matchingDocumentRefs)
2071
 
2072
          /*
2073
           * if the presence of this term is required ensure that the matching
2074
           * documents are added to the set of required matches for this clause.
2075
           *
2076
           */
2077
          if (clause.presence == lunr.Query.presence.REQUIRED) {
2078
            clauseMatches = clauseMatches.union(matchingDocumentsSet)
2079
 
2080
            if (requiredMatches[field] === undefined) {
2081
              requiredMatches[field] = lunr.Set.complete
2082
            }
2083
          }
2084
 
2085
          /*
2086
           * if the presence of this term is prohibited ensure that the matching
2087
           * documents are added to the set of prohibited matches for this field,
2088
           * creating that set if it does not yet exist.
2089
           */
2090
          if (clause.presence == lunr.Query.presence.PROHIBITED) {
2091
            if (prohibitedMatches[field] === undefined) {
2092
              prohibitedMatches[field] = lunr.Set.empty
2093
            }
2094
 
2095
            prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet)
2096
 
2097
            /*
2098
             * Prohibited matches should not be part of the query vector used for
2099
             * similarity scoring and no metadata should be extracted so we continue
2100
             * to the next field
2101
             */
2102
            continue
2103
          }
2104
 
2105
          /*
2106
           * The query field vector is populated using the termIndex found for
2107
           * the term and a unit value with the appropriate boost applied.
2108
           * Using upsert because there could already be an entry in the vector
2109
           * for the term we are working with. In that case we just add the scores
2110
           * together.
2111
           */
2112
          queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b })
2113
 
2114
          /**
2115
           * If we've already seen this term, field combo then we've already collected
2116
           * the matching documents and metadata, no need to go through all that again
2117
           */
2118
          if (termFieldCache[termField]) {
2119
            continue
2120
          }
2121
 
2122
          for (var l = 0; l < matchingDocumentRefs.length; l++) {
2123
            /*
2124
             * All metadata for this term/field/document triple
2125
             * are then extracted and collected into an instance
2126
             * of lunr.MatchData ready to be returned in the query
2127
             * results
2128
             */
2129
            var matchingDocumentRef = matchingDocumentRefs[l],
2130
                matchingFieldRef = new lunr.FieldRef (matchingDocumentRef, field),
2131
                metadata = fieldPosting[matchingDocumentRef],
2132
                fieldMatch
2133
 
2134
            if ((fieldMatch = matchingFields[matchingFieldRef]) === undefined) {
2135
              matchingFields[matchingFieldRef] = new lunr.MatchData (expandedTerm, field, metadata)
2136
            } else {
2137
              fieldMatch.add(expandedTerm, field, metadata)
2138
            }
2139
 
2140
          }
2141
 
2142
          termFieldCache[termField] = true
2143
        }
2144
      }
2145
    }
2146
 
2147
    /**
2148
     * If the presence was required we need to update the requiredMatches field sets.
2149
     * We do this after all fields for the term have collected their matches because
2150
     * the clause terms presence is required in _any_ of the fields not _all_ of the
2151
     * fields.
2152
     */
2153
    if (clause.presence === lunr.Query.presence.REQUIRED) {
2154
      for (var k = 0; k < clause.fields.length; k++) {
2155
        var field = clause.fields[k]
2156
        requiredMatches[field] = requiredMatches[field].intersect(clauseMatches)
2157
      }
2158
    }
2159
  }
2160
 
2161
  /**
2162
   * Need to combine the field scoped required and prohibited
2163
   * matching documents into a global set of required and prohibited
2164
   * matches
2165
   */
2166
  var allRequiredMatches = lunr.Set.complete,
2167
      allProhibitedMatches = lunr.Set.empty
2168
 
2169
  for (var i = 0; i < this.fields.length; i++) {
2170
    var field = this.fields[i]
2171
 
2172
    if (requiredMatches[field]) {
2173
      allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field])
2174
    }
2175
 
2176
    if (prohibitedMatches[field]) {
2177
      allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field])
2178
    }
2179
  }
2180
 
2181
  var matchingFieldRefs = Object.keys(matchingFields),
2182
      results = [],
2183
      matches = Object.create(null)
2184
 
2185
  /*
2186
   * If the query is negated (contains only prohibited terms)
2187
   * we need to get _all_ fieldRefs currently existing in the
2188
   * index. This is only done when we know that the query is
2189
   * entirely prohibited terms to avoid any cost of getting all
2190
   * fieldRefs unnecessarily.
2191
   *
2192
   * Additionally, blank MatchData must be created to correctly
2193
   * populate the results.
2194
   */
2195
  if (query.isNegated()) {
2196
    matchingFieldRefs = Object.keys(this.fieldVectors)
2197
 
2198
    for (var i = 0; i < matchingFieldRefs.length; i++) {
2199
      var matchingFieldRef = matchingFieldRefs[i]
2200
      var fieldRef = lunr.FieldRef.fromString(matchingFieldRef)
2201
      matchingFields[matchingFieldRef] = new lunr.MatchData
2202
    }
2203
  }
2204
 
2205
  for (var i = 0; i < matchingFieldRefs.length; i++) {
2206
    /*
2207
     * Currently we have document fields that match the query, but we
2208
     * need to return documents. The matchData and scores are combined
2209
     * from multiple fields belonging to the same document.
2210
     *
2211
     * Scores are calculated by field, using the query vectors created
2212
     * above, and combined into a final document score using addition.
2213
     */
2214
    var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]),
2215
        docRef = fieldRef.docRef
2216
 
2217
    if (!allRequiredMatches.contains(docRef)) {
2218
      continue
2219
    }
2220
 
2221
    if (allProhibitedMatches.contains(docRef)) {
2222
      continue
2223
    }
2224
 
2225
    var fieldVector = this.fieldVectors[fieldRef],
2226
        score = queryVectors[fieldRef.fieldName].similarity(fieldVector),
2227
        docMatch
2228
 
2229
    if ((docMatch = matches[docRef]) !== undefined) {
2230
      docMatch.score += score
2231
      docMatch.matchData.combine(matchingFields[fieldRef])
2232
    } else {
2233
      var match = {
2234
        ref: docRef,
2235
        score: score,
2236
        matchData: matchingFields[fieldRef]
2237
      }
2238
      matches[docRef] = match
2239
      results.push(match)
2240
    }
2241
  }
2242
 
2243
  /*
2244
   * Sort the results objects by score, highest first.
2245
   */
2246
  return results.sort(function (a, b) {
2247
    return b.score - a.score
2248
  })
2249
}
2250
 
2251
/**
2252
 * Prepares the index for JSON serialization.
2253
 *
2254
 * The schema for this JSON blob will be described in a
2255
 * separate JSON schema file.
2256
 *
2257
 * @returns {Object}
2258
 */
2259
lunr.Index.prototype.toJSON = function () {
2260
  var invertedIndex = Object.keys(this.invertedIndex)
2261
    .sort()
2262
    .map(function (term) {
2263
      return [term, this.invertedIndex[term]]
2264
    }, this)
2265
 
2266
  var fieldVectors = Object.keys(this.fieldVectors)
2267
    .map(function (ref) {
2268
      return [ref, this.fieldVectors[ref].toJSON()]
2269
    }, this)
2270
 
2271
  return {
2272
    version: lunr.version,
2273
    fields: this.fields,
2274
    fieldVectors: fieldVectors,
2275
    invertedIndex: invertedIndex,
2276
    pipeline: this.pipeline.toJSON()
2277
  }
2278
}
2279
 
2280
/**
2281
 * Loads a previously serialized lunr.Index
2282
 *
2283
 * @param {Object} serializedIndex - A previously serialized lunr.Index
2284
 * @returns {lunr.Index}
2285
 */
2286
lunr.Index.load = function (serializedIndex) {
2287
  var attrs = {},
2288
      fieldVectors = {},
2289
      serializedVectors = serializedIndex.fieldVectors,
2290
      invertedIndex = Object.create(null),
2291
      serializedInvertedIndex = serializedIndex.invertedIndex,
2292
      tokenSetBuilder = new lunr.TokenSet.Builder,
2293
      pipeline = lunr.Pipeline.load(serializedIndex.pipeline)
2294
 
2295
  if (serializedIndex.version != lunr.version) {
2296
    lunr.utils.warn("Version mismatch when loading serialised index. Current version of lunr '" + lunr.version + "' does not match serialized index '" + serializedIndex.version + "'")
2297
  }
2298
 
2299
  for (var i = 0; i < serializedVectors.length; i++) {
2300
    var tuple = serializedVectors[i],
2301
        ref = tuple[0],
2302
        elements = tuple[1]
2303
 
2304
    fieldVectors[ref] = new lunr.Vector(elements)
2305
  }
2306
 
2307
  for (var i = 0; i < serializedInvertedIndex.length; i++) {
2308
    var tuple = serializedInvertedIndex[i],
2309
        term = tuple[0],
2310
        posting = tuple[1]
2311
 
2312
    tokenSetBuilder.insert(term)
2313
    invertedIndex[term] = posting
2314
  }
2315
 
2316
  tokenSetBuilder.finish()
2317
 
2318
  attrs.fields = serializedIndex.fields
2319
 
2320
  attrs.fieldVectors = fieldVectors
2321
  attrs.invertedIndex = invertedIndex
2322
  attrs.tokenSet = tokenSetBuilder.root
2323
  attrs.pipeline = pipeline
2324
 
2325
  return new lunr.Index(attrs)
2326
}
2327
/*!
2328
 * lunr.Builder
2329
 * Copyright (C) 2020 Oliver Nightingale
2330
 */
2331
 
2332
/**
2333
 * lunr.Builder performs indexing on a set of documents and
2334
 * returns instances of lunr.Index ready for querying.
2335
 *
2336
 * All configuration of the index is done via the builder, the
2337
 * fields to index, the document reference, the text processing
2338
 * pipeline and document scoring parameters are all set on the
2339
 * builder before indexing.
2340
 *
2341
 * @constructor
2342
 * @property {string} _ref - Internal reference to the document reference field.
2343
 * @property {string[]} _fields - Internal reference to the document fields to index.
2344
 * @property {object} invertedIndex - The inverted index maps terms to document fields.
2345
 * @property {object} documentTermFrequencies - Keeps track of document term frequencies.
2346
 * @property {object} documentLengths - Keeps track of the length of documents added to the index.
2347
 * @property {lunr.tokenizer} tokenizer - Function for splitting strings into tokens for indexing.
2348
 * @property {lunr.Pipeline} pipeline - The pipeline performs text processing on tokens before indexing.
2349
 * @property {lunr.Pipeline} searchPipeline - A pipeline for processing search terms before querying the index.
2350
 * @property {number} documentCount - Keeps track of the total number of documents indexed.
2351
 * @property {number} _b - A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75.
2352
 * @property {number} _k1 - A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2.
2353
 * @property {number} termIndex - A counter incremented for each unique term, used to identify a terms position in the vector space.
2354
 * @property {array} metadataWhitelist - A list of metadata keys that have been whitelisted for entry in the index.
2355
 */
2356
lunr.Builder = function () {
2357
  this._ref = "id"
2358
  this._fields = Object.create(null)
2359
  this._documents = Object.create(null)
2360
  this.invertedIndex = Object.create(null)
2361
  this.fieldTermFrequencies = {}
2362
  this.fieldLengths = {}
2363
  this.tokenizer = lunr.tokenizer
2364
  this.pipeline = new lunr.Pipeline
2365
  this.searchPipeline = new lunr.Pipeline
2366
  this.documentCount = 0
2367
  this._b = 0.75
2368
  this._k1 = 1.2
2369
  this.termIndex = 0
2370
  this.metadataWhitelist = []
2371
}
2372
 
2373
/**
2374
 * Sets the document field used as the document reference. Every document must have this field.
2375
 * The type of this field in the document should be a string, if it is not a string it will be
2376
 * coerced into a string by calling toString.
2377
 *
2378
 * The default ref is 'id'.
2379
 *
2380
 * The ref should _not_ be changed during indexing, it should be set before any documents are
2381
 * added to the index. Changing it during indexing can lead to inconsistent results.
2382
 *
2383
 * @param {string} ref - The name of the reference field in the document.
2384
 */
2385
lunr.Builder.prototype.ref = function (ref) {
2386
  this._ref = ref
2387
}
2388
 
2389
/**
2390
 * A function that is used to extract a field from a document.
2391
 *
2392
 * Lunr expects a field to be at the top level of a document, if however the field
2393
 * is deeply nested within a document an extractor function can be used to extract
2394
 * the right field for indexing.
2395
 *
2396
 * @callback fieldExtractor
2397
 * @param {object} doc - The document being added to the index.
2398
 * @returns {?(string|object|object[])} obj - The object that will be indexed for this field.
2399
 * @example <caption>Extracting a nested field</caption>
2400
 * function (doc) { return doc.nested.field }
2401
 */
2402
 
2403
/**
2404
 * Adds a field to the list of document fields that will be indexed. Every document being
2405
 * indexed should have this field. Null values for this field in indexed documents will
2406
 * not cause errors but will limit the chance of that document being retrieved by searches.
2407
 *
2408
 * All fields should be added before adding documents to the index. Adding fields after
2409
 * a document has been indexed will have no effect on already indexed documents.
2410
 *
2411
 * Fields can be boosted at build time. This allows terms within that field to have more
2412
 * importance when ranking search results. Use a field boost to specify that matches within
2413
 * one field are more important than other fields.
2414
 *
2415
 * @param {string} fieldName - The name of a field to index in all documents.
2416
 * @param {object} attributes - Optional attributes associated with this field.
2417
 * @param {number} [attributes.boost=1] - Boost applied to all terms within this field.
2418
 * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document.
2419
 * @throws {RangeError} fieldName cannot contain unsupported characters '/'
2420
 */
2421
lunr.Builder.prototype.field = function (fieldName, attributes) {
2422
  if (/\//.test(fieldName)) {
2423
    throw new RangeError ("Field '" + fieldName + "' contains illegal character '/'")
2424
  }
2425
 
2426
  this._fields[fieldName] = attributes || {}
2427
}
2428
 
2429
/**
2430
 * A parameter to tune the amount of field length normalisation that is applied when
2431
 * calculating relevance scores. A value of 0 will completely disable any normalisation
2432
 * and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b
2433
 * will be clamped to the range 0 - 1.
2434
 *
2435
 * @param {number} number - The value to set for this tuning parameter.
2436
 */
2437
lunr.Builder.prototype.b = function (number) {
2438
  if (number < 0) {
2439
    this._b = 0
2440
  } else if (number > 1) {
2441
    this._b = 1
2442
  } else {
2443
    this._b = number
2444
  }
2445
}
2446
 
2447
/**
2448
 * A parameter that controls the speed at which a rise in term frequency results in term
2449
 * frequency saturation. The default value is 1.2. Setting this to a higher value will give
2450
 * slower saturation levels, a lower value will result in quicker saturation.
2451
 *
2452
 * @param {number} number - The value to set for this tuning parameter.
2453
 */
2454
lunr.Builder.prototype.k1 = function (number) {
2455
  this._k1 = number
2456
}
2457
 
2458
/**
2459
 * Adds a document to the index.
2460
 *
2461
 * Before adding fields to the index the index should have been fully setup, with the document
2462
 * ref and all fields to index already having been specified.
2463
 *
2464
 * The document must have a field name as specified by the ref (by default this is 'id') and
2465
 * it should have all fields defined for indexing, though null or undefined values will not
2466
 * cause errors.
2467
 *
2468
 * Entire documents can be boosted at build time. Applying a boost to a document indicates that
2469
 * this document should rank higher in search results than other documents.
2470
 *
2471
 * @param {object} doc - The document to add to the index.
2472
 * @param {object} attributes - Optional attributes associated with this document.
2473
 * @param {number} [attributes.boost=1] - Boost applied to all terms within this document.
2474
 */
2475
lunr.Builder.prototype.add = function (doc, attributes) {
2476
  var docRef = doc[this._ref],
2477
      fields = Object.keys(this._fields)
2478
 
2479
  this._documents[docRef] = attributes || {}
2480
  this.documentCount += 1
2481
 
2482
  for (var i = 0; i < fields.length; i++) {
2483
    var fieldName = fields[i],
2484
        extractor = this._fields[fieldName].extractor,
2485
        field = extractor ? extractor(doc) : doc[fieldName],
2486
        tokens = this.tokenizer(field, {
2487
          fields: [fieldName]
2488
        }),
2489
        terms = this.pipeline.run(tokens),
2490
        fieldRef = new lunr.FieldRef (docRef, fieldName),
2491
        fieldTerms = Object.create(null)
2492
 
2493
    this.fieldTermFrequencies[fieldRef] = fieldTerms
2494
    this.fieldLengths[fieldRef] = 0
2495
 
2496
    // store the length of this field for this document
2497
    this.fieldLengths[fieldRef] += terms.length
2498
 
2499
    // calculate term frequencies for this field
2500
    for (var j = 0; j < terms.length; j++) {
2501
      var term = terms[j]
2502
 
2503
      if (fieldTerms[term] == undefined) {
2504
        fieldTerms[term] = 0
2505
      }
2506
 
2507
      fieldTerms[term] += 1
2508
 
2509
      // add to inverted index
2510
      // create an initial posting if one doesn't exist
2511
      if (this.invertedIndex[term] == undefined) {
2512
        var posting = Object.create(null)
2513
        posting["_index"] = this.termIndex
2514
        this.termIndex += 1
2515
 
2516
        for (var k = 0; k < fields.length; k++) {
2517
          posting[fields[k]] = Object.create(null)
2518
        }
2519
 
2520
        this.invertedIndex[term] = posting
2521
      }
2522
 
2523
      // add an entry for this term/fieldName/docRef to the invertedIndex
2524
      if (this.invertedIndex[term][fieldName][docRef] == undefined) {
2525
        this.invertedIndex[term][fieldName][docRef] = Object.create(null)
2526
      }
2527
 
2528
      // store all whitelisted metadata about this token in the
2529
      // inverted index
2530
      for (var l = 0; l < this.metadataWhitelist.length; l++) {
2531
        var metadataKey = this.metadataWhitelist[l],
2532
            metadata = term.metadata[metadataKey]
2533
 
2534
        if (this.invertedIndex[term][fieldName][docRef][metadataKey] == undefined) {
2535
          this.invertedIndex[term][fieldName][docRef][metadataKey] = []
2536
        }
2537
 
2538
        this.invertedIndex[term][fieldName][docRef][metadataKey].push(metadata)
2539
      }
2540
    }
2541
 
2542
  }
2543
}
2544
 
2545
/**
2546
 * Calculates the average document length for this index
2547
 *
2548
 * @private
2549
 */
2550
lunr.Builder.prototype.calculateAverageFieldLengths = function () {
2551
 
2552
  var fieldRefs = Object.keys(this.fieldLengths),
2553
      numberOfFields = fieldRefs.length,
2554
      accumulator = {},
2555
      documentsWithField = {}
2556
 
2557
  for (var i = 0; i < numberOfFields; i++) {
2558
    var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
2559
        field = fieldRef.fieldName
2560
 
2561
    documentsWithField[field] || (documentsWithField[field] = 0)
2562
    documentsWithField[field] += 1
2563
 
2564
    accumulator[field] || (accumulator[field] = 0)
2565
    accumulator[field] += this.fieldLengths[fieldRef]
2566
  }
2567
 
2568
  var fields = Object.keys(this._fields)
2569
 
2570
  for (var i = 0; i < fields.length; i++) {
2571
    var fieldName = fields[i]
2572
    accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName]
2573
  }
2574
 
2575
  this.averageFieldLength = accumulator
2576
}
2577
 
2578
/**
2579
 * Builds a vector space model of every document using lunr.Vector
2580
 *
2581
 * @private
2582
 */
2583
lunr.Builder.prototype.createFieldVectors = function () {
2584
  var fieldVectors = {},
2585
      fieldRefs = Object.keys(this.fieldTermFrequencies),
2586
      fieldRefsLength = fieldRefs.length,
2587
      termIdfCache = Object.create(null)
2588
 
2589
  for (var i = 0; i < fieldRefsLength; i++) {
2590
    var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
2591
        fieldName = fieldRef.fieldName,
2592
        fieldLength = this.fieldLengths[fieldRef],
2593
        fieldVector = new lunr.Vector,
2594
        termFrequencies = this.fieldTermFrequencies[fieldRef],
2595
        terms = Object.keys(termFrequencies),
2596
        termsLength = terms.length
2597
 
2598
 
2599
    var fieldBoost = this._fields[fieldName].boost || 1,
2600
        docBoost = this._documents[fieldRef.docRef].boost || 1
2601
 
2602
    for (var j = 0; j < termsLength; j++) {
2603
      var term = terms[j],
2604
          tf = termFrequencies[term],
2605
          termIndex = this.invertedIndex[term]._index,
2606
          idf, score, scoreWithPrecision
2607
 
2608
      if (termIdfCache[term] === undefined) {
2609
        idf = lunr.idf(this.invertedIndex[term], this.documentCount)
2610
        termIdfCache[term] = idf
2611
      } else {
2612
        idf = termIdfCache[term]
2613
      }
2614
 
2615
      score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf)
2616
      score *= fieldBoost
2617
      score *= docBoost
2618
      scoreWithPrecision = Math.round(score * 1000) / 1000
2619
      // Converts 1.23456789 to 1.234.
2620
      // Reducing the precision so that the vectors take up less
2621
      // space when serialised. Doing it now so that they behave
2622
      // the same before and after serialisation. Also, this is
2623
      // the fastest approach to reducing a number's precision in
2624
      // JavaScript.
2625
 
2626
      fieldVector.insert(termIndex, scoreWithPrecision)
2627
    }
2628
 
2629
    fieldVectors[fieldRef] = fieldVector
2630
  }
2631
 
2632
  this.fieldVectors = fieldVectors
2633
}
2634
 
2635
/**
2636
 * Creates a token set of all tokens in the index using lunr.TokenSet
2637
 *
2638
 * @private
2639
 */
2640
lunr.Builder.prototype.createTokenSet = function () {
2641
  this.tokenSet = lunr.TokenSet.fromArray(
2642
    Object.keys(this.invertedIndex).sort()
2643
  )
2644
}
2645
 
2646
/**
2647
 * Builds the index, creating an instance of lunr.Index.
2648
 *
2649
 * This completes the indexing process and should only be called
2650
 * once all documents have been added to the index.
2651
 *
2652
 * @returns {lunr.Index}
2653
 */
2654
lunr.Builder.prototype.build = function () {
2655
  this.calculateAverageFieldLengths()
2656
  this.createFieldVectors()
2657
  this.createTokenSet()
2658
 
2659
  return new lunr.Index({
2660
    invertedIndex: this.invertedIndex,
2661
    fieldVectors: this.fieldVectors,
2662
    tokenSet: this.tokenSet,
2663
    fields: Object.keys(this._fields),
2664
    pipeline: this.searchPipeline
2665
  })
2666
}
2667
 
2668
/**
2669
 * Applies a plugin to the index builder.
2670
 *
2671
 * A plugin is a function that is called with the index builder as its context.
2672
 * Plugins can be used to customise or extend the behaviour of the index
2673
 * in some way. A plugin is just a function, that encapsulated the custom
2674
 * behaviour that should be applied when building the index.
2675
 *
2676
 * The plugin function will be called with the index builder as its argument, additional
2677
 * arguments can also be passed when calling use. The function will be called
2678
 * with the index builder as its context.
2679
 *
2680
 * @param {Function} plugin The plugin to apply.
2681
 */
2682
lunr.Builder.prototype.use = function (fn) {
2683
  var args = Array.prototype.slice.call(arguments, 1)
2684
  args.unshift(this)
2685
  fn.apply(this, args)
2686
}
2687
/**
2688
 * Contains and collects metadata about a matching document.
2689
 * A single instance of lunr.MatchData is returned as part of every
2690
 * lunr.Index~Result.
2691
 *
2692
 * @constructor
2693
 * @param {string} term - The term this match data is associated with
2694
 * @param {string} field - The field in which the term was found
2695
 * @param {object} metadata - The metadata recorded about this term in this field
2696
 * @property {object} metadata - A cloned collection of metadata associated with this document.
2697
 * @see {@link lunr.Index~Result}
2698
 */
2699
lunr.MatchData = function (term, field, metadata) {
2700
  var clonedMetadata = Object.create(null),
2701
      metadataKeys = Object.keys(metadata || {})
2702
 
2703
  // Cloning the metadata to prevent the original
2704
  // being mutated during match data combination.
2705
  // Metadata is kept in an array within the inverted
2706
  // index so cloning the data can be done with
2707
  // Array#slice
2708
  for (var i = 0; i < metadataKeys.length; i++) {
2709
    var key = metadataKeys[i]
2710
    clonedMetadata[key] = metadata[key].slice()
2711
  }
2712
 
2713
  this.metadata = Object.create(null)
2714
 
2715
  if (term !== undefined) {
2716
    this.metadata[term] = Object.create(null)
2717
    this.metadata[term][field] = clonedMetadata
2718
  }
2719
}
2720
 
2721
/**
2722
 * An instance of lunr.MatchData will be created for every term that matches a
2723
 * document. However only one instance is required in a lunr.Index~Result. This
2724
 * method combines metadata from another instance of lunr.MatchData with this
2725
 * objects metadata.
2726
 *
2727
 * @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one.
2728
 * @see {@link lunr.Index~Result}
2729
 */
2730
lunr.MatchData.prototype.combine = function (otherMatchData) {
2731
  var terms = Object.keys(otherMatchData.metadata)
2732
 
2733
  for (var i = 0; i < terms.length; i++) {
2734
    var term = terms[i],
2735
        fields = Object.keys(otherMatchData.metadata[term])
2736
 
2737
    if (this.metadata[term] == undefined) {
2738
      this.metadata[term] = Object.create(null)
2739
    }
2740
 
2741
    for (var j = 0; j < fields.length; j++) {
2742
      var field = fields[j],
2743
          keys = Object.keys(otherMatchData.metadata[term][field])
2744
 
2745
      if (this.metadata[term][field] == undefined) {
2746
        this.metadata[term][field] = Object.create(null)
2747
      }
2748
 
2749
      for (var k = 0; k < keys.length; k++) {
2750
        var key = keys[k]
2751
 
2752
        if (this.metadata[term][field][key] == undefined) {
2753
          this.metadata[term][field][key] = otherMatchData.metadata[term][field][key]
2754
        } else {
2755
          this.metadata[term][field][key] = this.metadata[term][field][key].concat(otherMatchData.metadata[term][field][key])
2756
        }
2757
 
2758
      }
2759
    }
2760
  }
2761
}
2762
 
2763
/**
2764
 * Add metadata for a term/field pair to this instance of match data.
2765
 *
2766
 * @param {string} term - The term this match data is associated with
2767
 * @param {string} field - The field in which the term was found
2768
 * @param {object} metadata - The metadata recorded about this term in this field
2769
 */
2770
lunr.MatchData.prototype.add = function (term, field, metadata) {
2771
  if (!(term in this.metadata)) {
2772
    this.metadata[term] = Object.create(null)
2773
    this.metadata[term][field] = metadata
2774
    return
2775
  }
2776
 
2777
  if (!(field in this.metadata[term])) {
2778
    this.metadata[term][field] = metadata
2779
    return
2780
  }
2781
 
2782
  var metadataKeys = Object.keys(metadata)
2783
 
2784
  for (var i = 0; i < metadataKeys.length; i++) {
2785
    var key = metadataKeys[i]
2786
 
2787
    if (key in this.metadata[term][field]) {
2788
      this.metadata[term][field][key] = this.metadata[term][field][key].concat(metadata[key])
2789
    } else {
2790
      this.metadata[term][field][key] = metadata[key]
2791
    }
2792
  }
2793
}
2794
/**
2795
 * A lunr.Query provides a programmatic way of defining queries to be performed
2796
 * against a {@link lunr.Index}.
2797
 *
2798
 * Prefer constructing a lunr.Query using the {@link lunr.Index#query} method
2799
 * so the query object is pre-initialized with the right index fields.
2800
 *
2801
 * @constructor
2802
 * @property {lunr.Query~Clause[]} clauses - An array of query clauses.
2803
 * @property {string[]} allFields - An array of all available fields in a lunr.Index.
2804
 */
2805
lunr.Query = function (allFields) {
2806
  this.clauses = []
2807
  this.allFields = allFields
2808
}
2809
 
2810
/**
2811
 * Constants for indicating what kind of automatic wildcard insertion will be used when constructing a query clause.
2812
 *
2813
 * This allows wildcards to be added to the beginning and end of a term without having to manually do any string
2814
 * concatenation.
2815
 *
2816
 * The wildcard constants can be bitwise combined to select both leading and trailing wildcards.
2817
 *
2818
 * @constant
2819
 * @default
2820
 * @property {number} wildcard.NONE - The term will have no wildcards inserted, this is the default behaviour
2821
 * @property {number} wildcard.LEADING - Prepend the term with a wildcard, unless a leading wildcard already exists
2822
 * @property {number} wildcard.TRAILING - Append a wildcard to the term, unless a trailing wildcard already exists
2823
 * @see lunr.Query~Clause
2824
 * @see lunr.Query#clause
2825
 * @see lunr.Query#term
2826
 * @example <caption>query term with trailing wildcard</caption>
2827
 * query.term('foo', { wildcard: lunr.Query.wildcard.TRAILING })
2828
 * @example <caption>query term with leading and trailing wildcard</caption>
2829
 * query.term('foo', {
2830
 *   wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING
2831
 * })
2832
 */
2833
 
2834
lunr.Query.wildcard = new String ("*")
2835
lunr.Query.wildcard.NONE = 0
2836
lunr.Query.wildcard.LEADING = 1
2837
lunr.Query.wildcard.TRAILING = 2
2838
 
2839
/**
2840
 * Constants for indicating what kind of presence a term must have in matching documents.
2841
 *
2842
 * @constant
2843
 * @enum {number}
2844
 * @see lunr.Query~Clause
2845
 * @see lunr.Query#clause
2846
 * @see lunr.Query#term
2847
 * @example <caption>query term with required presence</caption>
2848
 * query.term('foo', { presence: lunr.Query.presence.REQUIRED })
2849
 */
2850
lunr.Query.presence = {
2851
  /**
2852
   * Term's presence in a document is optional, this is the default value.
2853
   */
2854
  OPTIONAL: 1,
2855
 
2856
  /**
2857
   * Term's presence in a document is required, documents that do not contain
2858
   * this term will not be returned.
2859
   */
2860
  REQUIRED: 2,
2861
 
2862
  /**
2863
   * Term's presence in a document is prohibited, documents that do contain
2864
   * this term will not be returned.
2865
   */
2866
  PROHIBITED: 3
2867
}
2868
 
2869
/**
2870
 * A single clause in a {@link lunr.Query} contains a term and details on how to
2871
 * match that term against a {@link lunr.Index}.
2872
 *
2873
 * @typedef {Object} lunr.Query~Clause
2874
 * @property {string[]} fields - The fields in an index this clause should be matched against.
2875
 * @property {number} [boost=1] - Any boost that should be applied when matching this clause.
2876
 * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be.
2877
 * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline.
2878
 * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended.
2879
 * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents.
2880
 */
2881
 
2882
/**
2883
 * Adds a {@link lunr.Query~Clause} to this query.
2884
 *
2885
 * Unless the clause contains the fields to be matched all fields will be matched. In addition
2886
 * a default boost of 1 is applied to the clause.
2887
 *
2888
 * @param {lunr.Query~Clause} clause - The clause to add to this query.
2889
 * @see lunr.Query~Clause
2890
 * @returns {lunr.Query}
2891
 */
2892
lunr.Query.prototype.clause = function (clause) {
2893
  if (!('fields' in clause)) {
2894
    clause.fields = this.allFields
2895
  }
2896
 
2897
  if (!('boost' in clause)) {
2898
    clause.boost = 1
2899
  }
2900
 
2901
  if (!('usePipeline' in clause)) {
2902
    clause.usePipeline = true
2903
  }
2904
 
2905
  if (!('wildcard' in clause)) {
2906
    clause.wildcard = lunr.Query.wildcard.NONE
2907
  }
2908
 
2909
  if ((clause.wildcard & lunr.Query.wildcard.LEADING) && (clause.term.charAt(0) != lunr.Query.wildcard)) {
2910
    clause.term = "*" + clause.term
2911
  }
2912
 
2913
  if ((clause.wildcard & lunr.Query.wildcard.TRAILING) && (clause.term.slice(-1) != lunr.Query.wildcard)) {
2914
    clause.term = "" + clause.term + "*"
2915
  }
2916
 
2917
  if (!('presence' in clause)) {
2918
    clause.presence = lunr.Query.presence.OPTIONAL
2919
  }
2920
 
2921
  this.clauses.push(clause)
2922
 
2923
  return this
2924
}
2925
 
2926
/**
2927
 * A negated query is one in which every clause has a presence of
2928
 * prohibited. These queries require some special processing to return
2929
 * the expected results.
2930
 *
2931
 * @returns boolean
2932
 */
2933
lunr.Query.prototype.isNegated = function () {
2934
  for (var i = 0; i < this.clauses.length; i++) {
2935
    if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) {
2936
      return false
2937
    }
2938
  }
2939
 
2940
  return true
2941
}
2942
 
2943
/**
2944
 * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause}
2945
 * to the list of clauses that make up this query.
2946
 *
2947
 * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion
2948
 * to a token or token-like string should be done before calling this method.
2949
 *
2950
 * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an
2951
 * array, each term in the array will share the same options.
2952
 *
2953
 * @param {object|object[]} term - The term(s) to add to the query.
2954
 * @param {object} [options] - Any additional properties to add to the query clause.
2955
 * @returns {lunr.Query}
2956
 * @see lunr.Query#clause
2957
 * @see lunr.Query~Clause
2958
 * @example <caption>adding a single term to a query</caption>
2959
 * query.term("foo")
2960
 * @example <caption>adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard</caption>
2961
 * query.term("foo", {
2962
 *   fields: ["title"],
2963
 *   boost: 10,
2964
 *   wildcard: lunr.Query.wildcard.TRAILING
2965
 * })
2966
 * @example <caption>using lunr.tokenizer to convert a string to tokens before using them as terms</caption>
2967
 * query.term(lunr.tokenizer("foo bar"))
2968
 */
2969
lunr.Query.prototype.term = function (term, options) {
2970
  if (Array.isArray(term)) {
2971
    term.forEach(function (t) { this.term(t, lunr.utils.clone(options)) }, this)
2972
    return this
2973
  }
2974
 
2975
  var clause = options || {}
2976
  clause.term = term.toString()
2977
 
2978
  this.clause(clause)
2979
 
2980
  return this
2981
}
2982
lunr.QueryParseError = function (message, start, end) {
2983
  this.name = "QueryParseError"
2984
  this.message = message
2985
  this.start = start
2986
  this.end = end
2987
}
2988
 
2989
lunr.QueryParseError.prototype = new Error
2990
lunr.QueryLexer = function (str) {
2991
  this.lexemes = []
2992
  this.str = str
2993
  this.length = str.length
2994
  this.pos = 0
2995
  this.start = 0
2996
  this.escapeCharPositions = []
2997
}
2998
 
2999
lunr.QueryLexer.prototype.run = function () {
3000
  var state = lunr.QueryLexer.lexText
3001
 
3002
  while (state) {
3003
    state = state(this)
3004
  }
3005
}
3006
 
3007
lunr.QueryLexer.prototype.sliceString = function () {
3008
  var subSlices = [],
3009
      sliceStart = this.start,
3010
      sliceEnd = this.pos
3011
 
3012
  for (var i = 0; i < this.escapeCharPositions.length; i++) {
3013
    sliceEnd = this.escapeCharPositions[i]
3014
    subSlices.push(this.str.slice(sliceStart, sliceEnd))
3015
    sliceStart = sliceEnd + 1
3016
  }
3017
 
3018
  subSlices.push(this.str.slice(sliceStart, this.pos))
3019
  this.escapeCharPositions.length = 0
3020
 
3021
  return subSlices.join('')
3022
}
3023
 
3024
lunr.QueryLexer.prototype.emit = function (type) {
3025
  this.lexemes.push({
3026
    type: type,
3027
    str: this.sliceString(),
3028
    start: this.start,
3029
    end: this.pos
3030
  })
3031
 
3032
  this.start = this.pos
3033
}
3034
 
3035
lunr.QueryLexer.prototype.escapeCharacter = function () {
3036
  this.escapeCharPositions.push(this.pos - 1)
3037
  this.pos += 1
3038
}
3039
 
3040
lunr.QueryLexer.prototype.next = function () {
3041
  if (this.pos >= this.length) {
3042
    return lunr.QueryLexer.EOS
3043
  }
3044
 
3045
  var char = this.str.charAt(this.pos)
3046
  this.pos += 1
3047
  return char
3048
}
3049
 
3050
lunr.QueryLexer.prototype.width = function () {
3051
  return this.pos - this.start
3052
}
3053
 
3054
lunr.QueryLexer.prototype.ignore = function () {
3055
  if (this.start == this.pos) {
3056
    this.pos += 1
3057
  }
3058
 
3059
  this.start = this.pos
3060
}
3061
 
3062
lunr.QueryLexer.prototype.backup = function () {
3063
  this.pos -= 1
3064
}
3065
 
3066
lunr.QueryLexer.prototype.acceptDigitRun = function () {
3067
  var char, charCode
3068
 
3069
  do {
3070
    char = this.next()
3071
    charCode = char.charCodeAt(0)
3072
  } while (charCode > 47 && charCode < 58)
3073
 
3074
  if (char != lunr.QueryLexer.EOS) {
3075
    this.backup()
3076
  }
3077
}
3078
 
3079
lunr.QueryLexer.prototype.more = function () {
3080
  return this.pos < this.length
3081
}
3082
 
3083
lunr.QueryLexer.EOS = 'EOS'
3084
lunr.QueryLexer.FIELD = 'FIELD'
3085
lunr.QueryLexer.TERM = 'TERM'
3086
lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE'
3087
lunr.QueryLexer.BOOST = 'BOOST'
3088
lunr.QueryLexer.PRESENCE = 'PRESENCE'
3089
 
3090
lunr.QueryLexer.lexField = function (lexer) {
3091
  lexer.backup()
3092
  lexer.emit(lunr.QueryLexer.FIELD)
3093
  lexer.ignore()
3094
  return lunr.QueryLexer.lexText
3095
}
3096
 
3097
lunr.QueryLexer.lexTerm = function (lexer) {
3098
  if (lexer.width() > 1) {
3099
    lexer.backup()
3100
    lexer.emit(lunr.QueryLexer.TERM)
3101
  }
3102
 
3103
  lexer.ignore()
3104
 
3105
  if (lexer.more()) {
3106
    return lunr.QueryLexer.lexText
3107
  }
3108
}
3109
 
3110
lunr.QueryLexer.lexEditDistance = function (lexer) {
3111
  lexer.ignore()
3112
  lexer.acceptDigitRun()
3113
  lexer.emit(lunr.QueryLexer.EDIT_DISTANCE)
3114
  return lunr.QueryLexer.lexText
3115
}
3116
 
3117
lunr.QueryLexer.lexBoost = function (lexer) {
3118
  lexer.ignore()
3119
  lexer.acceptDigitRun()
3120
  lexer.emit(lunr.QueryLexer.BOOST)
3121
  return lunr.QueryLexer.lexText
3122
}
3123
 
3124
lunr.QueryLexer.lexEOS = function (lexer) {
3125
  if (lexer.width() > 0) {
3126
    lexer.emit(lunr.QueryLexer.TERM)
3127
  }
3128
}
3129
 
3130
// This matches the separator used when tokenising fields
3131
// within a document. These should match otherwise it is
3132
// not possible to search for some tokens within a document.
3133
//
3134
// It is possible for the user to change the separator on the
3135
// tokenizer so it _might_ clash with any other of the special
3136
// characters already used within the search string, e.g. :.
3137
//
3138
// This means that it is possible to change the separator in
3139
// such a way that makes some words unsearchable using a search
3140
// string.
3141
lunr.QueryLexer.termSeparator = lunr.tokenizer.separator
3142
 
3143
lunr.QueryLexer.lexText = function (lexer) {
3144
  while (true) {
3145
    var char = lexer.next()
3146
 
3147
    if (char == lunr.QueryLexer.EOS) {
3148
      return lunr.QueryLexer.lexEOS
3149
    }
3150
 
3151
    // Escape character is '\'
3152
    if (char.charCodeAt(0) == 92) {
3153
      lexer.escapeCharacter()
3154
      continue
3155
    }
3156
 
3157
    if (char == ":") {
3158
      return lunr.QueryLexer.lexField
3159
    }
3160
 
3161
    if (char == "~") {
3162
      lexer.backup()
3163
      if (lexer.width() > 0) {
3164
        lexer.emit(lunr.QueryLexer.TERM)
3165
      }
3166
      return lunr.QueryLexer.lexEditDistance
3167
    }
3168
 
3169
    if (char == "^") {
3170
      lexer.backup()
3171
      if (lexer.width() > 0) {
3172
        lexer.emit(lunr.QueryLexer.TERM)
3173
      }
3174
      return lunr.QueryLexer.lexBoost
3175
    }
3176
 
3177
    // "+" indicates term presence is required
3178
    // checking for length to ensure that only
3179
    // leading "+" are considered
3180
    if (char == "+" && lexer.width() === 1) {
3181
      lexer.emit(lunr.QueryLexer.PRESENCE)
3182
      return lunr.QueryLexer.lexText
3183
    }
3184
 
3185
    // "-" indicates term presence is prohibited
3186
    // checking for length to ensure that only
3187
    // leading "-" are considered
3188
    if (char == "-" && lexer.width() === 1) {
3189
      lexer.emit(lunr.QueryLexer.PRESENCE)
3190
      return lunr.QueryLexer.lexText
3191
    }
3192
 
3193
    if (char.match(lunr.QueryLexer.termSeparator)) {
3194
      return lunr.QueryLexer.lexTerm
3195
    }
3196
  }
3197
}
3198
 
3199
lunr.QueryParser = function (str, query) {
3200
  this.lexer = new lunr.QueryLexer (str)
3201
  this.query = query
3202
  this.currentClause = {}
3203
  this.lexemeIdx = 0
3204
}
3205
 
3206
lunr.QueryParser.prototype.parse = function () {
3207
  this.lexer.run()
3208
  this.lexemes = this.lexer.lexemes
3209
 
3210
  var state = lunr.QueryParser.parseClause
3211
 
3212
  while (state) {
3213
    state = state(this)
3214
  }
3215
 
3216
  return this.query
3217
}
3218
 
3219
lunr.QueryParser.prototype.peekLexeme = function () {
3220
  return this.lexemes[this.lexemeIdx]
3221
}
3222
 
3223
lunr.QueryParser.prototype.consumeLexeme = function () {
3224
  var lexeme = this.peekLexeme()
3225
  this.lexemeIdx += 1
3226
  return lexeme
3227
}
3228
 
3229
lunr.QueryParser.prototype.nextClause = function () {
3230
  var completedClause = this.currentClause
3231
  this.query.clause(completedClause)
3232
  this.currentClause = {}
3233
}
3234
 
3235
lunr.QueryParser.parseClause = function (parser) {
3236
  var lexeme = parser.peekLexeme()
3237
 
3238
  if (lexeme == undefined) {
3239
    return
3240
  }
3241
 
3242
  switch (lexeme.type) {
3243
    case lunr.QueryLexer.PRESENCE:
3244
      return lunr.QueryParser.parsePresence
3245
    case lunr.QueryLexer.FIELD:
3246
      return lunr.QueryParser.parseField
3247
    case lunr.QueryLexer.TERM:
3248
      return lunr.QueryParser.parseTerm
3249
    default:
3250
      var errorMessage = "expected either a field or a term, found " + lexeme.type
3251
 
3252
      if (lexeme.str.length >= 1) {
3253
        errorMessage += " with value '" + lexeme.str + "'"
3254
      }
3255
 
3256
      throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3257
  }
3258
}
3259
 
3260
lunr.QueryParser.parsePresence = function (parser) {
3261
  var lexeme = parser.consumeLexeme()
3262
 
3263
  if (lexeme == undefined) {
3264
    return
3265
  }
3266
 
3267
  switch (lexeme.str) {
3268
    case "-":
3269
      parser.currentClause.presence = lunr.Query.presence.PROHIBITED
3270
      break
3271
    case "+":
3272
      parser.currentClause.presence = lunr.Query.presence.REQUIRED
3273
      break
3274
    default:
3275
      var errorMessage = "unrecognised presence operator'" + lexeme.str + "'"
3276
      throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3277
  }
3278
 
3279
  var nextLexeme = parser.peekLexeme()
3280
 
3281
  if (nextLexeme == undefined) {
3282
    var errorMessage = "expecting term or field, found nothing"
3283
    throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3284
  }
3285
 
3286
  switch (nextLexeme.type) {
3287
    case lunr.QueryLexer.FIELD:
3288
      return lunr.QueryParser.parseField
3289
    case lunr.QueryLexer.TERM:
3290
      return lunr.QueryParser.parseTerm
3291
    default:
3292
      var errorMessage = "expecting term or field, found '" + nextLexeme.type + "'"
3293
      throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
3294
  }
3295
}
3296
 
3297
lunr.QueryParser.parseField = function (parser) {
3298
  var lexeme = parser.consumeLexeme()
3299
 
3300
  if (lexeme == undefined) {
3301
    return
3302
  }
3303
 
3304
  if (parser.query.allFields.indexOf(lexeme.str) == -1) {
3305
    var possibleFields = parser.query.allFields.map(function (f) { return "'" + f + "'" }).join(', '),
3306
        errorMessage = "unrecognised field '" + lexeme.str + "', possible fields: " + possibleFields
3307
 
3308
    throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3309
  }
3310
 
3311
  parser.currentClause.fields = [lexeme.str]
3312
 
3313
  var nextLexeme = parser.peekLexeme()
3314
 
3315
  if (nextLexeme == undefined) {
3316
    var errorMessage = "expecting term, found nothing"
3317
    throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3318
  }
3319
 
3320
  switch (nextLexeme.type) {
3321
    case lunr.QueryLexer.TERM:
3322
      return lunr.QueryParser.parseTerm
3323
    default:
3324
      var errorMessage = "expecting term, found '" + nextLexeme.type + "'"
3325
      throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
3326
  }
3327
}
3328
 
3329
lunr.QueryParser.parseTerm = function (parser) {
3330
  var lexeme = parser.consumeLexeme()
3331
 
3332
  if (lexeme == undefined) {
3333
    return
3334
  }
3335
 
3336
  parser.currentClause.term = lexeme.str.toLowerCase()
3337
 
3338
  if (lexeme.str.indexOf("*") != -1) {
3339
    parser.currentClause.usePipeline = false
3340
  }
3341
 
3342
  var nextLexeme = parser.peekLexeme()
3343
 
3344
  if (nextLexeme == undefined) {
3345
    parser.nextClause()
3346
    return
3347
  }
3348
 
3349
  switch (nextLexeme.type) {
3350
    case lunr.QueryLexer.TERM:
3351
      parser.nextClause()
3352
      return lunr.QueryParser.parseTerm
3353
    case lunr.QueryLexer.FIELD:
3354
      parser.nextClause()
3355
      return lunr.QueryParser.parseField
3356
    case lunr.QueryLexer.EDIT_DISTANCE:
3357
      return lunr.QueryParser.parseEditDistance
3358
    case lunr.QueryLexer.BOOST:
3359
      return lunr.QueryParser.parseBoost
3360
    case lunr.QueryLexer.PRESENCE:
3361
      parser.nextClause()
3362
      return lunr.QueryParser.parsePresence
3363
    default:
3364
      var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
3365
      throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
3366
  }
3367
}
3368
 
3369
lunr.QueryParser.parseEditDistance = function (parser) {
3370
  var lexeme = parser.consumeLexeme()
3371
 
3372
  if (lexeme == undefined) {
3373
    return
3374
  }
3375
 
3376
  var editDistance = parseInt(lexeme.str, 10)
3377
 
3378
  if (isNaN(editDistance)) {
3379
    var errorMessage = "edit distance must be numeric"
3380
    throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3381
  }
3382
 
3383
  parser.currentClause.editDistance = editDistance
3384
 
3385
  var nextLexeme = parser.peekLexeme()
3386
 
3387
  if (nextLexeme == undefined) {
3388
    parser.nextClause()
3389
    return
3390
  }
3391
 
3392
  switch (nextLexeme.type) {
3393
    case lunr.QueryLexer.TERM:
3394
      parser.nextClause()
3395
      return lunr.QueryParser.parseTerm
3396
    case lunr.QueryLexer.FIELD:
3397
      parser.nextClause()
3398
      return lunr.QueryParser.parseField
3399
    case lunr.QueryLexer.EDIT_DISTANCE:
3400
      return lunr.QueryParser.parseEditDistance
3401
    case lunr.QueryLexer.BOOST:
3402
      return lunr.QueryParser.parseBoost
3403
    case lunr.QueryLexer.PRESENCE:
3404
      parser.nextClause()
3405
      return lunr.QueryParser.parsePresence
3406
    default:
3407
      var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
3408
      throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
3409
  }
3410
}
3411
 
3412
lunr.QueryParser.parseBoost = function (parser) {
3413
  var lexeme = parser.consumeLexeme()
3414
 
3415
  if (lexeme == undefined) {
3416
    return
3417
  }
3418
 
3419
  var boost = parseInt(lexeme.str, 10)
3420
 
3421
  if (isNaN(boost)) {
3422
    var errorMessage = "boost must be numeric"
3423
    throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
3424
  }
3425
 
3426
  parser.currentClause.boost = boost
3427
 
3428
  var nextLexeme = parser.peekLexeme()
3429
 
3430
  if (nextLexeme == undefined) {
3431
    parser.nextClause()
3432
    return
3433
  }
3434
 
3435
  switch (nextLexeme.type) {
3436
    case lunr.QueryLexer.TERM:
3437
      parser.nextClause()
3438
      return lunr.QueryParser.parseTerm
3439
    case lunr.QueryLexer.FIELD:
3440
      parser.nextClause()
3441
      return lunr.QueryParser.parseField
3442
    case lunr.QueryLexer.EDIT_DISTANCE:
3443
      return lunr.QueryParser.parseEditDistance
3444
    case lunr.QueryLexer.BOOST:
3445
      return lunr.QueryParser.parseBoost
3446
    case lunr.QueryLexer.PRESENCE:
3447
      parser.nextClause()
3448
      return lunr.QueryParser.parsePresence
3449
    default:
3450
      var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
3451
      throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
3452
  }
3453
}
3454
 
3455
  /**
3456
   * export the module via AMD, CommonJS or as a browser global
3457
   * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js
3458
   */
3459
  ;(function (root, factory) {
3460
    if (typeof define === 'function' && define.amd) {
3461
      // AMD. Register as an anonymous module.
3462
      define(factory)
3463
    } else if (typeof exports === 'object') {
3464
      /**
3465
       * Node. Does not work with strict CommonJS, but
3466
       * only CommonJS-like enviroments that support module.exports,
3467
       * like Node.
3468
       */
3469
      module.exports = factory()
3470
    } else {
3471
      // Browser globals (root is window)
3472
      root.lunr = factory()
3473
    }
3474
  }(this, function () {
3475
    /**
3476
     * Just return a value to define the module export.
3477
     * This example returns an object, but the module
3478
     * can return a function as the exported value.
3479
     */
3480
    return lunr
3481
  }))
3482
})();