< Summary

Class:Azure.Search.Documents.Indexes.Models.LexicalTokenizerName
Assembly:Azure.Search.Documents
File(s):C:\Git\azure-sdk-for-net\sdk\search\Azure.Search.Documents\src\Generated\Models\LexicalTokenizerName.cs
Covered lines:4
Uncovered lines:18
Coverable lines:22
Total lines:84
Line coverage:18.1% (4 of 22)
Covered branches:1
Total branches:6
Branch coverage:16.6% (1 of 6)

Metrics

MethodCyclomatic complexity Line coverage Branch coverage
.ctor(...)-100%50%
get_Classic()-0%100%
get_EdgeNGram()-0%100%
get_Keyword()-0%100%
get_Letter()-0%100%
get_Lowercase()-0%100%
get_MicrosoftLanguageTokenizer()-0%100%
get_MicrosoftLanguageStemmingTokenizer()-0%100%
get_NGram()-0%100%
get_PathHierarchy()-0%100%
get_Pattern()-0%100%
get_Standard()-0%100%
get_UaxUrlEmail()-0%100%
get_Whitespace()-100%100%
op_Equality(...)-0%100%
op_Inequality(...)-0%100%
op_Implicit(...)-0%100%
Equals(...)-0%0%
Equals(...)-0%100%
GetHashCode()-0%0%
ToString()-100%100%

File(s)

C:\Git\azure-sdk-for-net\sdk\search\Azure.Search.Documents\src\Generated\Models\LexicalTokenizerName.cs

#LineLine coverage
 1// Copyright (c) Microsoft Corporation. All rights reserved.
 2// Licensed under the MIT License.
 3
 4// <auto-generated/>
 5
 6#nullable disable
 7
 8using System;
 9using System.ComponentModel;
 10
 11namespace Azure.Search.Documents.Indexes.Models
 12{
 13    /// <summary> Defines the names of all tokenizers supported by Azure Cognitive Search. </summary>
 14    public readonly partial struct LexicalTokenizerName : IEquatable<LexicalTokenizerName>
 15    {
 16        private readonly string _value;
 17
 18        /// <summary> Determines if two <see cref="LexicalTokenizerName"/> values are the same. </summary>
 19        /// <exception cref="ArgumentNullException"> <paramref name="value"/> is null. </exception>
 20        public LexicalTokenizerName(string value)
 21        {
 1322            _value = value ?? throw new ArgumentNullException(nameof(value));
 1323        }
 24
 25        private const string ClassicValue = "classic";
 26        private const string EdgeNGramValue = "edgeNGram";
 27        private const string KeywordValue = "keyword_v2";
 28        private const string LetterValue = "letter";
 29        private const string LowercaseValue = "lowercase";
 30        private const string MicrosoftLanguageTokenizerValue = "microsoft_language_tokenizer";
 31        private const string MicrosoftLanguageStemmingTokenizerValue = "microsoft_language_stemming_tokenizer";
 32        private const string NGramValue = "nGram";
 33        private const string PathHierarchyValue = "path_hierarchy_v2";
 34        private const string PatternValue = "pattern";
 35        private const string StandardValue = "standard_v2";
 36        private const string UaxUrlEmailValue = "uax_url_email";
 37        private const string WhitespaceValue = "whitespace";
 38
 39        /// <summary> Grammar-based tokenizer that is suitable for processing most European-language documents. See http
 040        public static LexicalTokenizerName Classic { get; } = new LexicalTokenizerName(ClassicValue);
 41        /// <summary> Tokenizes the input from an edge into n-grams of the given size(s). See https://lucene.apache.org/
 042        public static LexicalTokenizerName EdgeNGram { get; } = new LexicalTokenizerName(EdgeNGramValue);
 43        /// <summary> Emits the entire input as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-commo
 044        public static LexicalTokenizerName Keyword { get; } = new LexicalTokenizerName(KeywordValue);
 45        /// <summary> Divides text at non-letters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/
 046        public static LexicalTokenizerName Letter { get; } = new LexicalTokenizerName(LetterValue);
 47        /// <summary> Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_1
 048        public static LexicalTokenizerName Lowercase { get; } = new LexicalTokenizerName(LowercaseValue);
 49        /// <summary> Divides text using language-specific rules. </summary>
 050        public static LexicalTokenizerName MicrosoftLanguageTokenizer { get; } = new LexicalTokenizerName(MicrosoftLangu
 51        /// <summary> Divides text using language-specific rules and reduces words to their base forms. </summary>
 052        public static LexicalTokenizerName MicrosoftLanguageStemmingTokenizer { get; } = new LexicalTokenizerName(Micros
 53        /// <summary> Tokenizes the input into n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/an
 054        public static LexicalTokenizerName NGram { get; } = new LexicalTokenizerName(NGramValue);
 55        /// <summary> Tokenizer for path-like hierarchies. See http://lucene.apache.org/core/4_10_3/analyzers-common/org
 056        public static LexicalTokenizerName PathHierarchy { get; } = new LexicalTokenizerName(PathHierarchyValue);
 57        /// <summary> Tokenizer that uses regex pattern matching to construct distinct tokens. See http://lucene.apache.
 058        public static LexicalTokenizerName Pattern { get; } = new LexicalTokenizerName(PatternValue);
 59        /// <summary> Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. Se
 060        public static LexicalTokenizerName Standard { get; } = new LexicalTokenizerName(StandardValue);
 61        /// <summary> Tokenizes urls and emails as one token. See http://lucene.apache.org/core/4_10_3/analyzers-common/
 062        public static LexicalTokenizerName UaxUrlEmail { get; } = new LexicalTokenizerName(UaxUrlEmailValue);
 63        /// <summary> Divides text at whitespace. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/l
 464        public static LexicalTokenizerName Whitespace { get; } = new LexicalTokenizerName(WhitespaceValue);
 65        /// <summary> Determines if two <see cref="LexicalTokenizerName"/> values are the same. </summary>
 066        public static bool operator ==(LexicalTokenizerName left, LexicalTokenizerName right) => left.Equals(right);
 67        /// <summary> Determines if two <see cref="LexicalTokenizerName"/> values are not the same. </summary>
 068        public static bool operator !=(LexicalTokenizerName left, LexicalTokenizerName right) => !left.Equals(right);
 69        /// <summary> Converts a string to a <see cref="LexicalTokenizerName"/>. </summary>
 070        public static implicit operator LexicalTokenizerName(string value) => new LexicalTokenizerName(value);
 71
 72        /// <inheritdoc />
 73        [EditorBrowsable(EditorBrowsableState.Never)]
 074        public override bool Equals(object obj) => obj is LexicalTokenizerName other && Equals(other);
 75        /// <inheritdoc />
 076        public bool Equals(LexicalTokenizerName other) => string.Equals(_value, other._value, StringComparison.Invariant
 77
 78        /// <inheritdoc />
 79        [EditorBrowsable(EditorBrowsableState.Never)]
 080        public override int GetHashCode() => _value?.GetHashCode() ?? 0;
 81        /// <inheritdoc />
 282        public override string ToString() => _value;
 83    }
 84}