| | 1 | | // <auto-generated> |
| | 2 | | // Copyright (c) Microsoft Corporation. All rights reserved. |
| | 3 | | // Licensed under the MIT License. See License.txt in the project root for |
| | 4 | | // license information. |
| | 5 | | // |
| | 6 | | // Code generated by Microsoft (R) AutoRest Code Generator. |
| | 7 | | // Changes may cause incorrect behavior and will be lost if the code is |
| | 8 | | // regenerated. |
| | 9 | | // </auto-generated> |
| | 10 | |
|
| | 11 | | namespace Microsoft.Azure.Search.Models |
| | 12 | | { |
| | 13 | | using Newtonsoft.Json; |
| | 14 | | using System.Collections; |
| | 15 | | using System.Collections.Generic; |
| | 16 | | using System.Linq; |
| | 17 | |
|
| | 18 | | /// <summary> |
| | 19 | | /// Allows you to take control over the process of converting text into |
| | 20 | | /// indexable/searchable tokens. It's a user-defined configuration |
| | 21 | | /// consisting of a single predefined tokenizer and one or more filters. |
| | 22 | | /// The tokenizer is responsible for breaking text into tokens, and the |
| | 23 | | /// filters for modifying tokens emitted by the tokenizer. |
| | 24 | | /// </summary> |
| | 25 | | [Newtonsoft.Json.JsonObject("#Microsoft.Azure.Search.CustomAnalyzer")] |
| | 26 | | public partial class CustomAnalyzer : Analyzer |
| | 27 | | { |
| | 28 | | /// <summary> |
| | 29 | | /// Initializes a new instance of the CustomAnalyzer class. |
| | 30 | | /// </summary> |
| 36 | 31 | | public CustomAnalyzer() |
| | 32 | | { |
| | 33 | | CustomInit(); |
| 36 | 34 | | } |
| | 35 | |
|
| | 36 | | /// <summary> |
| | 37 | | /// Initializes a new instance of the CustomAnalyzer class. |
| | 38 | | /// </summary> |
| | 39 | | /// <param name="name">The name of the analyzer. It must only contain |
| | 40 | | /// letters, digits, spaces, dashes or underscores, can only start and |
| | 41 | | /// end with alphanumeric characters, and is limited to 128 |
| | 42 | | /// characters.</param> |
| | 43 | | /// <param name="tokenizer">The name of the tokenizer to use to divide |
| | 44 | | /// continuous text into a sequence of tokens, such as breaking a |
| | 45 | | /// sentence into words. Possible values include: 'classic', |
| | 46 | | /// 'edgeNGram', 'keyword_v2', 'letter', 'lowercase', |
| | 47 | | /// 'microsoft_language_tokenizer', |
| | 48 | | /// 'microsoft_language_stemming_tokenizer', 'nGram', |
| | 49 | | /// 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', |
| | 50 | | /// 'whitespace'</param> |
| | 51 | | /// <param name="tokenFilters">A list of token filters used to filter |
| | 52 | | /// out or modify the tokens generated by a tokenizer. For example, you |
| | 53 | | /// can specify a lowercase filter that converts all characters to |
| | 54 | | /// lowercase. The filters are run in the order in which they are |
| | 55 | | /// listed.</param> |
| | 56 | | /// <param name="charFilters">A list of character filters used to |
| | 57 | | /// prepare input text before it is processed by the tokenizer. For |
| | 58 | | /// instance, they can replace certain characters or symbols. The |
| | 59 | | /// filters are run in the order in which they are listed.</param> |
| | 60 | | public CustomAnalyzer(string name, TokenizerName tokenizer, IList<TokenFilterName> tokenFilters = default(IList< |
| 32 | 61 | | : base(name) |
| | 62 | | { |
| 32 | 63 | | Tokenizer = tokenizer; |
| 32 | 64 | | TokenFilters = tokenFilters; |
| 32 | 65 | | CharFilters = charFilters; |
| | 66 | | CustomInit(); |
| 32 | 67 | | } |
| | 68 | |
|
| | 69 | | /// <summary> |
| | 70 | | /// An initialization method that performs custom operations like setting defaults |
| | 71 | | /// </summary> |
| | 72 | | partial void CustomInit(); |
| | 73 | |
|
| | 74 | | /// <summary> |
| | 75 | | /// Gets or sets the name of the tokenizer to use to divide continuous |
| | 76 | | /// text into a sequence of tokens, such as breaking a sentence into |
| | 77 | | /// words. Possible values include: 'classic', 'edgeNGram', |
| | 78 | | /// 'keyword_v2', 'letter', 'lowercase', |
| | 79 | | /// 'microsoft_language_tokenizer', |
| | 80 | | /// 'microsoft_language_stemming_tokenizer', 'nGram', |
| | 81 | | /// 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', |
| | 82 | | /// 'whitespace' |
| | 83 | | /// </summary> |
| | 84 | | [JsonProperty(PropertyName = "tokenizer")] |
| 166 | 85 | | public TokenizerName Tokenizer { get; set; } |
| | 86 | |
|
| | 87 | | /// <summary> |
| | 88 | | /// Gets or sets a list of token filters used to filter out or modify |
| | 89 | | /// the tokens generated by a tokenizer. For example, you can specify a |
| | 90 | | /// lowercase filter that converts all characters to lowercase. The |
| | 91 | | /// filters are run in the order in which they are listed. |
| | 92 | | /// </summary> |
| | 93 | | [JsonProperty(PropertyName = "tokenFilters")] |
| 164 | 94 | | public IList<TokenFilterName> TokenFilters { get; set; } |
| | 95 | |
|
| | 96 | | /// <summary> |
| | 97 | | /// Gets or sets a list of character filters used to prepare input text |
| | 98 | | /// before it is processed by the tokenizer. For instance, they can |
| | 99 | | /// replace certain characters or symbols. The filters are run in the |
| | 100 | | /// order in which they are listed. |
| | 101 | | /// </summary> |
| | 102 | | [JsonProperty(PropertyName = "charFilters")] |
| 166 | 103 | | public IList<CharFilterName> CharFilters { get; set; } |
| | 104 | |
|
| | 105 | | /// <summary> |
| | 106 | | /// Validate the object. |
| | 107 | | /// </summary> |
| | 108 | | /// <exception cref="Rest.ValidationException"> |
| | 109 | | /// Thrown if validation fails |
| | 110 | | /// </exception> |
| | 111 | | public override void Validate() |
| | 112 | | { |
| 34 | 113 | | base.Validate(); |
| 34 | 114 | | } |
| | 115 | | } |
| | 116 | | } |