| | 1 | | // Copyright (c) Microsoft Corporation. All rights reserved. |
| | 2 | | // Licensed under the MIT License. |
| | 3 | |
|
| | 4 | | // <auto-generated/> |
| | 5 | |
|
| | 6 | | #nullable disable |
| | 7 | |
|
| | 8 | | using System; |
| | 9 | | using System.ComponentModel; |
| | 10 | |
|
| | 11 | | namespace Azure.Search.Documents.Indexes.Models |
| | 12 | | { |
| | 13 | | /// <summary> Defines the names of all token filters supported by Azure Cognitive Search. </summary> |
| | 14 | | public readonly partial struct TokenFilterName : IEquatable<TokenFilterName> |
| | 15 | | { |
| | 16 | | private readonly string _value; |
| | 17 | |
|
| | 18 | | /// <summary> Determines if two <see cref="TokenFilterName"/> values are the same. </summary> |
| | 19 | | /// <exception cref="ArgumentNullException"> <paramref name="value"/> is null. </exception> |
| | 20 | | public TokenFilterName(string value) |
| | 21 | | { |
| 0 | 22 | | _value = value ?? throw new ArgumentNullException(nameof(value)); |
| 0 | 23 | | } |
| | 24 | |
|
| | 25 | | private const string ArabicNormalizationValue = "arabic_normalization"; |
| | 26 | | private const string ApostropheValue = "apostrophe"; |
| | 27 | | private const string AsciiFoldingValue = "asciifolding"; |
| | 28 | | private const string CjkBigramValue = "cjk_bigram"; |
| | 29 | | private const string CjkWidthValue = "cjk_width"; |
| | 30 | | private const string ClassicValue = "classic"; |
| | 31 | | private const string CommonGramValue = "common_grams"; |
| | 32 | | private const string EdgeNGramValue = "edgeNGram_v2"; |
| | 33 | | private const string ElisionValue = "elision"; |
| | 34 | | private const string GermanNormalizationValue = "german_normalization"; |
| | 35 | | private const string HindiNormalizationValue = "hindi_normalization"; |
| | 36 | | private const string IndicNormalizationValue = "indic_normalization"; |
| | 37 | | private const string KeywordRepeatValue = "keyword_repeat"; |
| | 38 | | private const string KStemValue = "kstem"; |
| | 39 | | private const string LengthValue = "length"; |
| | 40 | | private const string LimitValue = "limit"; |
| | 41 | | private const string LowercaseValue = "lowercase"; |
| | 42 | | private const string NGramValue = "nGram_v2"; |
| | 43 | | private const string PersianNormalizationValue = "persian_normalization"; |
| | 44 | | private const string PhoneticValue = "phonetic"; |
| | 45 | | private const string PorterStemValue = "porter_stem"; |
| | 46 | | private const string ReverseValue = "reverse"; |
| | 47 | | private const string ScandinavianNormalizationValue = "scandinavian_normalization"; |
| | 48 | | private const string ScandinavianFoldingNormalizationValue = "scandinavian_folding"; |
| | 49 | | private const string ShingleValue = "shingle"; |
| | 50 | | private const string SnowballValue = "snowball"; |
| | 51 | | private const string SoraniNormalizationValue = "sorani_normalization"; |
| | 52 | | private const string StemmerValue = "stemmer"; |
| | 53 | | private const string StopwordsValue = "stopwords"; |
| | 54 | | private const string TrimValue = "trim"; |
| | 55 | | private const string TruncateValue = "truncate"; |
| | 56 | | private const string UniqueValue = "unique"; |
| | 57 | | private const string UppercaseValue = "uppercase"; |
| | 58 | | private const string WordDelimiterValue = "word_delimiter"; |
| | 59 | |
|
| | 60 | | /// <summary> A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene. |
| 0 | 61 | | public static TokenFilterName ArabicNormalization { get; } = new TokenFilterName(ArabicNormalizationValue); |
| | 62 | | /// <summary> Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apa |
| 0 | 63 | | public static TokenFilterName Apostrophe { get; } = new TokenFilterName(ApostropheValue); |
| | 64 | | /// <summary> Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII |
| 0 | 65 | | public static TokenFilterName AsciiFolding { get; } = new TokenFilterName(AsciiFoldingValue); |
| | 66 | | /// <summary> Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apach |
| 0 | 67 | | public static TokenFilterName CjkBigram { get; } = new TokenFilterName(CjkBigramValue); |
| | 68 | | /// <summary> Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, |
| 0 | 69 | | public static TokenFilterName CjkWidth { get; } = new TokenFilterName(CjkWidthValue); |
| | 70 | | /// <summary> Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/anal |
| 0 | 71 | | public static TokenFilterName Classic { get; } = new TokenFilterName(ClassicValue); |
| | 72 | | /// <summary> Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed to |
| 0 | 73 | | public static TokenFilterName CommonGram { get; } = new TokenFilterName(CommonGramValue); |
| | 74 | | /// <summary> Generates n-grams of the given size(s) starting from the front or the back of an input token. See |
| 0 | 75 | | public static TokenFilterName EdgeNGram { get; } = new TokenFilterName(EdgeNGramValue); |
| | 76 | | /// <summary> Removes elisions. For example, "l'avion" (the plane) will be converted to "avi |
| 0 | 77 | | public static TokenFilterName Elision { get; } = new TokenFilterName(ElisionValue); |
| | 78 | | /// <summary> Normalizes German characters according to the heuristics of the German2 snowball algorithm. See ht |
| 0 | 79 | | public static TokenFilterName GermanNormalization { get; } = new TokenFilterName(GermanNormalizationValue); |
| | 80 | | /// <summary> Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apac |
| 0 | 81 | | public static TokenFilterName HindiNormalization { get; } = new TokenFilterName(HindiNormalizationValue); |
| | 82 | | /// <summary> Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/co |
| 0 | 83 | | public static TokenFilterName IndicNormalization { get; } = new TokenFilterName(IndicNormalizationValue); |
| | 84 | | /// <summary> Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache |
| 0 | 85 | | public static TokenFilterName KeywordRepeat { get; } = new TokenFilterName(KeywordRepeatValue); |
| | 86 | | /// <summary> A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-co |
| 0 | 87 | | public static TokenFilterName KStem { get; } = new TokenFilterName(KStemValue); |
| | 88 | | /// <summary> Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-c |
| 0 | 89 | | public static TokenFilterName Length { get; } = new TokenFilterName(LengthValue); |
| | 90 | | /// <summary> Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-com |
| 0 | 91 | | public static TokenFilterName Limit { get; } = new TokenFilterName(LimitValue); |
| | 92 | | /// <summary> Normalizes token text to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org |
| 0 | 93 | | public static TokenFilterName Lowercase { get; } = new TokenFilterName(LowercaseValue); |
| | 94 | | /// <summary> Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/ |
| 0 | 95 | | public static TokenFilterName NGram { get; } = new TokenFilterName(NGramValue); |
| | 96 | | /// <summary> Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/a |
| 0 | 97 | | public static TokenFilterName PersianNormalization { get; } = new TokenFilterName(PersianNormalizationValue); |
| | 98 | | /// <summary> Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/o |
| 0 | 99 | | public static TokenFilterName Phonetic { get; } = new TokenFilterName(PhoneticValue); |
| | 100 | | /// <summary> Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/ |
| 0 | 101 | | public static TokenFilterName PorterStem { get; } = new TokenFilterName(PorterStemValue); |
| | 102 | | /// <summary> Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lu |
| 0 | 103 | | public static TokenFilterName Reverse { get; } = new TokenFilterName(ReverseValue); |
| | 104 | | /// <summary> Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4 |
| 0 | 105 | | public static TokenFilterName ScandinavianNormalization { get; } = new TokenFilterName(ScandinavianNormalization |
| | 106 | | /// <summary> Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against u |
| 0 | 107 | | public static TokenFilterName ScandinavianFoldingNormalization { get; } = new TokenFilterName(ScandinavianFoldin |
| | 108 | | /// <summary> Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyze |
| 0 | 109 | | public static TokenFilterName Shingle { get; } = new TokenFilterName(ShingleValue); |
| | 110 | | /// <summary> A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_ |
| 0 | 111 | | public static TokenFilterName Snowball { get; } = new TokenFilterName(SnowballValue); |
| | 112 | | /// <summary> Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/ana |
| 0 | 113 | | public static TokenFilterName SoraniNormalization { get; } = new TokenFilterName(SoraniNormalizationValue); |
| | 114 | | /// <summary> Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-an |
| 0 | 115 | | public static TokenFilterName Stemmer { get; } = new TokenFilterName(StemmerValue); |
| | 116 | | /// <summary> Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/ |
| 0 | 117 | | public static TokenFilterName Stopwords { get; } = new TokenFilterName(StopwordsValue); |
| | 118 | | /// <summary> Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyz |
| 0 | 119 | | public static TokenFilterName Trim { get; } = new TokenFilterName(TrimValue); |
| | 120 | | /// <summary> Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-commo |
| 0 | 121 | | public static TokenFilterName Truncate { get; } = new TokenFilterName(TruncateValue); |
| | 122 | | /// <summary> Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/ |
| 0 | 123 | | public static TokenFilterName Unique { get; } = new TokenFilterName(UniqueValue); |
| | 124 | | /// <summary> Normalizes token text to upper case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org |
| 0 | 125 | | public static TokenFilterName Uppercase { get; } = new TokenFilterName(UppercaseValue); |
| | 126 | | /// <summary> Splits words into subwords and performs optional transformations on subword groups. </summary> |
| 0 | 127 | | public static TokenFilterName WordDelimiter { get; } = new TokenFilterName(WordDelimiterValue); |
| | 128 | | /// <summary> Determines if two <see cref="TokenFilterName"/> values are the same. </summary> |
| 0 | 129 | | public static bool operator ==(TokenFilterName left, TokenFilterName right) => left.Equals(right); |
| | 130 | | /// <summary> Determines if two <see cref="TokenFilterName"/> values are not the same. </summary> |
| 0 | 131 | | public static bool operator !=(TokenFilterName left, TokenFilterName right) => !left.Equals(right); |
| | 132 | | /// <summary> Converts a string to a <see cref="TokenFilterName"/>. </summary> |
| 0 | 133 | | public static implicit operator TokenFilterName(string value) => new TokenFilterName(value); |
| | 134 | |
|
| | 135 | | /// <inheritdoc /> |
| | 136 | | [EditorBrowsable(EditorBrowsableState.Never)] |
| 0 | 137 | | public override bool Equals(object obj) => obj is TokenFilterName other && Equals(other); |
| | 138 | | /// <inheritdoc /> |
| 0 | 139 | | public bool Equals(TokenFilterName other) => string.Equals(_value, other._value, StringComparison.InvariantCultu |
| | 140 | |
|
| | 141 | | /// <inheritdoc /> |
| | 142 | | [EditorBrowsable(EditorBrowsableState.Never)] |
| 0 | 143 | | public override int GetHashCode() => _value?.GetHashCode() ?? 0; |
| | 144 | | /// <inheritdoc /> |
| 0 | 145 | | public override string ToString() => _value; |
| | 146 | | } |
| | 147 | | } |