< Summary

Class:Azure.Search.Documents.Indexes.Models.EdgeNGramTokenizer
Assembly:Azure.Search.Documents
File(s):C:\Git\azure-sdk-for-net\sdk\search\Azure.Search.Documents\src\Generated\Models\EdgeNGramTokenizer.cs
C:\Git\azure-sdk-for-net\sdk\search\Azure.Search.Documents\src\Generated\Models\EdgeNGramTokenizer.Serialization.cs
C:\Git\azure-sdk-for-net\sdk\search\Azure.Search.Documents\src\Indexes\Models\EdgeNGramTokenizer.cs
Covered lines:0
Uncovered lines:58
Coverable lines:58
Total lines:153
Line coverage:0% (0 of 58)
Covered branches:0
Total branches:26
Branch coverage:0% (0 of 26)

Metrics

MethodCyclomatic complexity Line coverage Branch coverage
.ctor(...)-0%0%
.ctor(...)-0%0%
get_MinGram()-0%100%
get_MaxGram()-0%100%
Azure.Core.IUtf8JsonSerializable.Write(...)-0%0%
DeserializeEdgeNGramTokenizer(...)-0%0%
get_TokenChars()-0%100%

File(s)

C:\Git\azure-sdk-for-net\sdk\search\Azure.Search.Documents\src\Generated\Models\EdgeNGramTokenizer.cs

#LineLine coverage
 1// Copyright (c) Microsoft Corporation. All rights reserved.
 2// Licensed under the MIT License.
 3
 4// <auto-generated/>
 5
 6#nullable disable
 7
 8using System;
 9using System.Collections.Generic;
 10using Azure.Core;
 11
 12namespace Azure.Search.Documents.Indexes.Models
 13{
 14    /// <summary> Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented usin
 15    public partial class EdgeNGramTokenizer : LexicalTokenizer
 16    {
 17        /// <summary> Initializes a new instance of EdgeNGramTokenizer. </summary>
 18        /// <param name="name"> The name of the tokenizer. It must only contain letters, digits, spaces, dashes or under
 19        /// <exception cref="ArgumentNullException"> <paramref name="name"/> is null. </exception>
 020        public EdgeNGramTokenizer(string name) : base(name)
 21        {
 022            if (name == null)
 23            {
 024                throw new ArgumentNullException(nameof(name));
 25            }
 26
 027            TokenChars = new ChangeTrackingList<TokenCharacterKind>();
 028            ODataType = "#Microsoft.Azure.Search.EdgeNGramTokenizer";
 029        }
 30
 31        /// <summary> Initializes a new instance of EdgeNGramTokenizer. </summary>
 32        /// <param name="oDataType"> Identifies the concrete type of the tokenizer. </param>
 33        /// <param name="name"> The name of the tokenizer. It must only contain letters, digits, spaces, dashes or under
 34        /// <param name="minGram"> The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value 
 35        /// <param name="maxGram"> The maximum n-gram length. Default is 2. Maximum is 300. </param>
 36        /// <param name="tokenChars"> Character classes to keep in the tokens. </param>
 037        internal EdgeNGramTokenizer(string oDataType, string name, int? minGram, int? maxGram, IList<TokenCharacterKind>
 38        {
 039            MinGram = minGram;
 040            MaxGram = maxGram;
 041            TokenChars = tokenChars;
 042            ODataType = oDataType ?? "#Microsoft.Azure.Search.EdgeNGramTokenizer";
 043        }
 44
 45        /// <summary> The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. <
 046        public int? MinGram { get; set; }
 47        /// <summary> The maximum n-gram length. Default is 2. Maximum is 300. </summary>
 048        public int? MaxGram { get; set; }
 49    }
 50}

C:\Git\azure-sdk-for-net\sdk\search\Azure.Search.Documents\src\Generated\Models\EdgeNGramTokenizer.Serialization.cs

#LineLine coverage
 1// Copyright (c) Microsoft Corporation. All rights reserved.
 2// Licensed under the MIT License.
 3
 4// <auto-generated/>
 5
 6#nullable disable
 7
 8using System.Collections.Generic;
 9using System.Text.Json;
 10using Azure.Core;
 11
 12namespace Azure.Search.Documents.Indexes.Models
 13{
 14    public partial class EdgeNGramTokenizer : IUtf8JsonSerializable
 15    {
 16        void IUtf8JsonSerializable.Write(Utf8JsonWriter writer)
 17        {
 018            writer.WriteStartObject();
 019            if (Optional.IsDefined(MinGram))
 20            {
 021                writer.WritePropertyName("minGram");
 022                writer.WriteNumberValue(MinGram.Value);
 23            }
 024            if (Optional.IsDefined(MaxGram))
 25            {
 026                writer.WritePropertyName("maxGram");
 027                writer.WriteNumberValue(MaxGram.Value);
 28            }
 029            if (Optional.IsCollectionDefined(TokenChars))
 30            {
 031                writer.WritePropertyName("tokenChars");
 032                writer.WriteStartArray();
 033                foreach (var item in TokenChars)
 34                {
 035                    writer.WriteStringValue(item.ToSerialString());
 36                }
 037                writer.WriteEndArray();
 38            }
 039            writer.WritePropertyName("@odata.type");
 040            writer.WriteStringValue(ODataType);
 041            writer.WritePropertyName("name");
 042            writer.WriteStringValue(Name);
 043            writer.WriteEndObject();
 044        }
 45
 46        internal static EdgeNGramTokenizer DeserializeEdgeNGramTokenizer(JsonElement element)
 47        {
 048            Optional<int> minGram = default;
 049            Optional<int> maxGram = default;
 050            Optional<IList<TokenCharacterKind>> tokenChars = default;
 051            string odataType = default;
 052            string name = default;
 053            foreach (var property in element.EnumerateObject())
 54            {
 055                if (property.NameEquals("minGram"))
 56                {
 057                    minGram = property.Value.GetInt32();
 058                    continue;
 59                }
 060                if (property.NameEquals("maxGram"))
 61                {
 062                    maxGram = property.Value.GetInt32();
 063                    continue;
 64                }
 065                if (property.NameEquals("tokenChars"))
 66                {
 067                    List<TokenCharacterKind> array = new List<TokenCharacterKind>();
 068                    foreach (var item in property.Value.EnumerateArray())
 69                    {
 070                        array.Add(item.GetString().ToTokenCharacterKind());
 71                    }
 072                    tokenChars = array;
 073                    continue;
 74                }
 075                if (property.NameEquals("@odata.type"))
 76                {
 077                    odataType = property.Value.GetString();
 078                    continue;
 79                }
 080                if (property.NameEquals("name"))
 81                {
 082                    name = property.Value.GetString();
 83                    continue;
 84                }
 85            }
 086            return new EdgeNGramTokenizer(odataType, name, Optional.ToNullable(minGram), Optional.ToNullable(maxGram), O
 87        }
 88    }
 89}

C:\Git\azure-sdk-for-net\sdk\search\Azure.Search.Documents\src\Indexes\Models\EdgeNGramTokenizer.cs

#LineLine coverage
 1// Copyright (c) Microsoft Corporation. All rights reserved.
 2// Licensed under the MIT License.
 3
 4using System.Collections.Generic;
 5using Azure.Core;
 6
 7namespace Azure.Search.Documents.Indexes.Models
 8{
 9    public partial class EdgeNGramTokenizer
 10    {
 11        /// <summary> Character classes to keep in the tokens. </summary>
 012        public IList<TokenCharacterKind> TokenChars { get; }
 13    }
 14}