< Summary

Class:Azure.Search.Documents.Indexes.Models.TokenFilterName
Assembly:Azure.Search.Documents
File(s):C:\Git\azure-sdk-for-net\sdk\search\Azure.Search.Documents\src\Generated\Models\TokenFilterName.cs
Covered lines:0
Uncovered lines:43
Coverable lines:43
Total lines:147
Line coverage:0% (0 of 43)
Covered branches:0
Total branches:6
Branch coverage:0% (0 of 6)

Metrics

MethodCyclomatic complexity Line coverage Branch coverage
.ctor(...)-0%0%
get_ArabicNormalization()-0%100%
get_Apostrophe()-0%100%
get_AsciiFolding()-0%100%
get_CjkBigram()-0%100%
get_CjkWidth()-0%100%
get_Classic()-0%100%
get_CommonGram()-0%100%
get_EdgeNGram()-0%100%
get_Elision()-0%100%
get_GermanNormalization()-0%100%
get_HindiNormalization()-0%100%
get_IndicNormalization()-0%100%
get_KeywordRepeat()-0%100%
get_KStem()-0%100%
get_Length()-0%100%
get_Limit()-0%100%
get_Lowercase()-0%100%
get_NGram()-0%100%
get_PersianNormalization()-0%100%
get_Phonetic()-0%100%
get_PorterStem()-0%100%
get_Reverse()-0%100%
get_ScandinavianNormalization()-0%100%
get_ScandinavianFoldingNormalization()-0%100%
get_Shingle()-0%100%
get_Snowball()-0%100%
get_SoraniNormalization()-0%100%
get_Stemmer()-0%100%
get_Stopwords()-0%100%
get_Trim()-0%100%
get_Truncate()-0%100%
get_Unique()-0%100%
get_Uppercase()-0%100%
get_WordDelimiter()-0%100%
op_Equality(...)-0%100%
op_Inequality(...)-0%100%
op_Implicit(...)-0%100%
Equals(...)-0%0%
Equals(...)-0%100%
GetHashCode()-0%0%
ToString()-0%100%

File(s)

C:\Git\azure-sdk-for-net\sdk\search\Azure.Search.Documents\src\Generated\Models\TokenFilterName.cs

#LineLine coverage
 1// Copyright (c) Microsoft Corporation. All rights reserved.
 2// Licensed under the MIT License.
 3
 4// <auto-generated/>
 5
 6#nullable disable
 7
 8using System;
 9using System.ComponentModel;
 10
 11namespace Azure.Search.Documents.Indexes.Models
 12{
 13    /// <summary> Defines the names of all token filters supported by Azure Cognitive Search. </summary>
 14    public readonly partial struct TokenFilterName : IEquatable<TokenFilterName>
 15    {
 16        private readonly string _value;
 17
 18        /// <summary> Determines if two <see cref="TokenFilterName"/> values are the same. </summary>
 19        /// <exception cref="ArgumentNullException"> <paramref name="value"/> is null. </exception>
 20        public TokenFilterName(string value)
 21        {
 022            _value = value ?? throw new ArgumentNullException(nameof(value));
 023        }
 24
 25        private const string ArabicNormalizationValue = "arabic_normalization";
 26        private const string ApostropheValue = "apostrophe";
 27        private const string AsciiFoldingValue = "asciifolding";
 28        private const string CjkBigramValue = "cjk_bigram";
 29        private const string CjkWidthValue = "cjk_width";
 30        private const string ClassicValue = "classic";
 31        private const string CommonGramValue = "common_grams";
 32        private const string EdgeNGramValue = "edgeNGram_v2";
 33        private const string ElisionValue = "elision";
 34        private const string GermanNormalizationValue = "german_normalization";
 35        private const string HindiNormalizationValue = "hindi_normalization";
 36        private const string IndicNormalizationValue = "indic_normalization";
 37        private const string KeywordRepeatValue = "keyword_repeat";
 38        private const string KStemValue = "kstem";
 39        private const string LengthValue = "length";
 40        private const string LimitValue = "limit";
 41        private const string LowercaseValue = "lowercase";
 42        private const string NGramValue = "nGram_v2";
 43        private const string PersianNormalizationValue = "persian_normalization";
 44        private const string PhoneticValue = "phonetic";
 45        private const string PorterStemValue = "porter_stem";
 46        private const string ReverseValue = "reverse";
 47        private const string ScandinavianNormalizationValue = "scandinavian_normalization";
 48        private const string ScandinavianFoldingNormalizationValue = "scandinavian_folding";
 49        private const string ShingleValue = "shingle";
 50        private const string SnowballValue = "snowball";
 51        private const string SoraniNormalizationValue = "sorani_normalization";
 52        private const string StemmerValue = "stemmer";
 53        private const string StopwordsValue = "stopwords";
 54        private const string TrimValue = "trim";
 55        private const string TruncateValue = "truncate";
 56        private const string UniqueValue = "unique";
 57        private const string UppercaseValue = "uppercase";
 58        private const string WordDelimiterValue = "word_delimiter";
 59
 60        /// <summary> A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.
 061        public static TokenFilterName ArabicNormalization { get; } = new TokenFilterName(ArabicNormalizationValue);
 62        /// <summary> Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apa
 063        public static TokenFilterName Apostrophe { get; } = new TokenFilterName(ApostropheValue);
 64        /// <summary> Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII
 065        public static TokenFilterName AsciiFolding { get; } = new TokenFilterName(AsciiFoldingValue);
 66        /// <summary> Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apach
 067        public static TokenFilterName CjkBigram { get; } = new TokenFilterName(CjkBigramValue);
 68        /// <summary> Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, 
 069        public static TokenFilterName CjkWidth { get; } = new TokenFilterName(CjkWidthValue);
 70        /// <summary> Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/anal
 071        public static TokenFilterName Classic { get; } = new TokenFilterName(ClassicValue);
 72        /// <summary> Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed to
 073        public static TokenFilterName CommonGram { get; } = new TokenFilterName(CommonGramValue);
 74        /// <summary> Generates n-grams of the given size(s) starting from the front or the back of an input token. See 
 075        public static TokenFilterName EdgeNGram { get; } = new TokenFilterName(EdgeNGramValue);
 76        /// <summary> Removes elisions. For example, &quot;l&apos;avion&quot; (the plane) will be converted to &quot;avi
 077        public static TokenFilterName Elision { get; } = new TokenFilterName(ElisionValue);
 78        /// <summary> Normalizes German characters according to the heuristics of the German2 snowball algorithm. See ht
 079        public static TokenFilterName GermanNormalization { get; } = new TokenFilterName(GermanNormalizationValue);
 80        /// <summary> Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apac
 081        public static TokenFilterName HindiNormalization { get; } = new TokenFilterName(HindiNormalizationValue);
 82        /// <summary> Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/co
 083        public static TokenFilterName IndicNormalization { get; } = new TokenFilterName(IndicNormalizationValue);
 84        /// <summary> Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache
 085        public static TokenFilterName KeywordRepeat { get; } = new TokenFilterName(KeywordRepeatValue);
 86        /// <summary> A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-co
 087        public static TokenFilterName KStem { get; } = new TokenFilterName(KStemValue);
 88        /// <summary> Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-c
 089        public static TokenFilterName Length { get; } = new TokenFilterName(LengthValue);
 90        /// <summary> Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-com
 091        public static TokenFilterName Limit { get; } = new TokenFilterName(LimitValue);
 92        /// <summary> Normalizes token text to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org
 093        public static TokenFilterName Lowercase { get; } = new TokenFilterName(LowercaseValue);
 94        /// <summary> Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/
 095        public static TokenFilterName NGram { get; } = new TokenFilterName(NGramValue);
 96        /// <summary> Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/a
 097        public static TokenFilterName PersianNormalization { get; } = new TokenFilterName(PersianNormalizationValue);
 98        /// <summary> Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/o
 099        public static TokenFilterName Phonetic { get; } = new TokenFilterName(PhoneticValue);
 100        /// <summary> Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/
 0101        public static TokenFilterName PorterStem { get; } = new TokenFilterName(PorterStemValue);
 102        /// <summary> Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lu
 0103        public static TokenFilterName Reverse { get; } = new TokenFilterName(ReverseValue);
 104        /// <summary> Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4
 0105        public static TokenFilterName ScandinavianNormalization { get; } = new TokenFilterName(ScandinavianNormalization
 106        /// <summary> Folds Scandinavian characters åÅäæÄÆ-&amp;gt;a and öÖøØ-&amp;gt;o. It also discriminates against u
 0107        public static TokenFilterName ScandinavianFoldingNormalization { get; } = new TokenFilterName(ScandinavianFoldin
 108        /// <summary> Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyze
 0109        public static TokenFilterName Shingle { get; } = new TokenFilterName(ShingleValue);
 110        /// <summary> A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_
 0111        public static TokenFilterName Snowball { get; } = new TokenFilterName(SnowballValue);
 112        /// <summary> Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/ana
 0113        public static TokenFilterName SoraniNormalization { get; } = new TokenFilterName(SoraniNormalizationValue);
 114        /// <summary> Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-an
 0115        public static TokenFilterName Stemmer { get; } = new TokenFilterName(StemmerValue);
 116        /// <summary> Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/
 0117        public static TokenFilterName Stopwords { get; } = new TokenFilterName(StopwordsValue);
 118        /// <summary> Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyz
 0119        public static TokenFilterName Trim { get; } = new TokenFilterName(TrimValue);
 120        /// <summary> Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-commo
 0121        public static TokenFilterName Truncate { get; } = new TokenFilterName(TruncateValue);
 122        /// <summary> Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/
 0123        public static TokenFilterName Unique { get; } = new TokenFilterName(UniqueValue);
 124        /// <summary> Normalizes token text to upper case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org
 0125        public static TokenFilterName Uppercase { get; } = new TokenFilterName(UppercaseValue);
 126        /// <summary> Splits words into subwords and performs optional transformations on subword groups. </summary>
 0127        public static TokenFilterName WordDelimiter { get; } = new TokenFilterName(WordDelimiterValue);
 128        /// <summary> Determines if two <see cref="TokenFilterName"/> values are the same. </summary>
 0129        public static bool operator ==(TokenFilterName left, TokenFilterName right) => left.Equals(right);
 130        /// <summary> Determines if two <see cref="TokenFilterName"/> values are not the same. </summary>
 0131        public static bool operator !=(TokenFilterName left, TokenFilterName right) => !left.Equals(right);
 132        /// <summary> Converts a string to a <see cref="TokenFilterName"/>. </summary>
 0133        public static implicit operator TokenFilterName(string value) => new TokenFilterName(value);
 134
 135        /// <inheritdoc />
 136        [EditorBrowsable(EditorBrowsableState.Never)]
 0137        public override bool Equals(object obj) => obj is TokenFilterName other && Equals(other);
 138        /// <inheritdoc />
 0139        public bool Equals(TokenFilterName other) => string.Equals(_value, other._value, StringComparison.InvariantCultu
 140
 141        /// <inheritdoc />
 142        [EditorBrowsable(EditorBrowsableState.Never)]
 0143        public override int GetHashCode() => _value?.GetHashCode() ?? 0;
 144        /// <inheritdoc />
 0145        public override string ToString() => _value;
 146    }
 147}