From f02dc073c5044fc11b1ee85ce5cb66e96128801d Mon Sep 17 00:00:00 2001 From: Ferenc Hernadi Date: Fri, 27 Sep 2019 00:39:55 +0200 Subject: [PATCH] Add ES params --- README.md | 6 +- ...logging.banzaicloud.io_clusteroutputs.yaml | 78 +++++++++++++++++++ .../logging.banzaicloud.io_outputs.yaml | 78 +++++++++++++++++++ ...logging.banzaicloud.io_clusteroutputs.yaml | 78 +++++++++++++++++++ .../bases/logging.banzaicloud.io_outputs.yaml | 78 +++++++++++++++++++ docs/deploy/manifests/deployment.yaml | 4 + docs/plugins/outputs/elasticsearch.md | 12 +++ pkg/model/output/elasticsearch.go | 44 +++++++++-- 8 files changed, 367 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index c22d95370..1d95b8dbb 100644 --- a/README.md +++ b/README.md @@ -106,9 +106,9 @@ For complete list of supported plugins pleas checkl the [plugins index](/docs/pl | Name | Type | Description | Status | Version | |---------------------------------------------------------|:------:|:-------------------------------------------------------------------------:|---------|-------------------------------------------------------------------------------------------| | [Alibaba](./docs/plugins/outputs/oss.md) | Output | Store logs the Alibaba Cloud Object Storage Service | GA | [0.0.1](https://github.com/aliyun/fluent-plugin-oss) | -| [Amazon S3](./docs/plugins/outputs/s3.md) | Output | Store logs in Amazon S3 | GA | [1.1.10](https://github.com/fluent/fluent-plugin-s3/releases/tag/v1.1.10) | -| [Azure](./docs/plugins/outputs/azurestore.md) | Output | Store logs in Azure Storega | GA | [0.1.1](https://github.com/htgc/fluent-plugin-azurestorage/releases/tag/v0.1.0) | -| [Google Storage](./docs/plugins/outputs/gcs.md) | Output | Store logs in Google Cloud Storage | GA | [0.4.0.beta1](https://github.com/banzaicloud/fluent-plugin-gcs) | +| [Amazon S3](./docs/plugins/outputs/s3.md) | Output | Store logs in Amazon S3 | GA | [1.1.11](https://github.com/fluent/fluent-plugin-s3/releases/tag/v1.1.11) | +| [Azure](./docs/plugins/outputs/azurestore.md) | Output | Store logs in Azure Storega | GA | [0.1.0](https://github.com/htgc/fluent-plugin-azurestorage/releases/tag/v0.1.0) | +| [Google Storage](./docs/plugins/outputs/gcs.md) | Output | Store logs in Google Cloud Storage | GA | [0.4.0](https://github.com/banzaicloud/fluent-plugin-gcs) | | [Grafana Loki](./docs/plugins/outputs/loki.md) | Output | Transfer logs to Loki | Testing | [0.2](https://github.com/banzaicloud/fluent-plugin-kubernetes-loki/releases/tag/v0.2) | | [ElasticSearch](./docs/plugins/outputs/elasticsearch.md) | Output | Send your logs to Elasticsearch | GA | [3.5.5](https://github.com/uken/fluent-plugin-elasticsearch/releases/tag/v3.5.5) | | [Sumologic](./docs/plugins/outputs/sumologic.md) | Output | Send your logs to Sumologic | GA | [1.5.0](https://github.com/SumoLogic/fluentd-output-sumologic/releases/tag/1.5.0) | diff --git a/charts/logging-operator/templates/logging.banzaicloud.io_clusteroutputs.yaml b/charts/logging-operator/templates/logging.banzaicloud.io_clusteroutputs.yaml index 19dc8a0de..27d37c70e 100644 --- a/charts/logging-operator/templates/logging.banzaicloud.io_clusteroutputs.yaml +++ b/charts/logging-operator/templates/logging.banzaicloud.io_clusteroutputs.yaml @@ -410,11 +410,21 @@ spec: required: - timekey type: object + bulk_message_request_threshold: + description: 'Configure bulk_message request splitting threshold + size. Default value is 20MB. (20 * 1024 * 1024) If you specify + this size as negative number, bulk_message request splitting feature + will be disabled. (default: 20MB)' + type: string content_type: description: 'With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Type in payload. (default: application/json)' type: string + custom_headers: + description: 'This parameter adds additional headers to request. + Example: {"token":"secret"} (default: {})' + type: string customize_template: description: Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be @@ -422,16 +432,36 @@ spec: template and to add rollover index please check the rollover_index configuration. type: string + default_elasticsearch_version: + description: This parameter changes that ES plugin assumes default + Elasticsearch version. The default value is 5. + type: string deflector_alias: description: Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API type: string + exception_backup: + description: 'Indicates whether to backup chunk when ignore exception + occurs. (default: true)' + type: boolean fail_on_putting_template_retry_exceed: description: 'Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true)' type: boolean + flatten_hashes: + description: 'Elasticsearch will complain if you send object and + concrete values to the same field. For example, you might have + logs that look this, from different places: {"people" => 100} + {"people" => {"some" => "thing"}} The second log line will be + rejected by the Elasticsearch parser because objects and concrete + values can''t live in the same field. To combat this, you can + enable hash flattening.' + type: boolean + flatten_hashes_separator: + description: Flatten separator + type: string host: description: You can specify Elasticsearch host by this parameter. (default:localhost) @@ -448,6 +478,16 @@ spec: id_key: description: https://github.com/uken/fluent-plugin-elasticsearch#id_key type: string + ignore_exceptions: + description: A list of exception that will be ignored - when the + exception occurs the chunk will be discarded and the buffer retry + mechanism won't be called. It is possible also to specify classes + at higher level in the hierarchy. For example `ignore_exceptions + ["Elasticsearch::Transport::Transport::ServerError"]` will match + all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, + Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, + etc. + type: string include_index_in_url: description: With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). @@ -471,6 +511,14 @@ spec: description: Specify the index prefix for the rollover index to be created. type: string + log_es_400_reason: + description: 'By default, the error logger won''t record the reason + for a 400 error from the Elasticsearch API unless you set log_level + to debug. However, this results in a lot of log spam, which isn''t + desirable if all you want is the 400 error reasons. You can set + this true to capture the 400 error reasons without all the other + debug logs. (default: false)' + type: boolean logstash_dateformat: description: 'Set the Logstash date format.(default: %Y.%m.%d)' type: string @@ -589,6 +637,12 @@ spec: description: If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2] type: string + suppress_doc_wrap: + description: 'By default, record body is wrapped by ''doc''. This + behavior can not handle update script requests. You can set this + to suppress doc wrapping and allow record body to be untouched. + (default: false)' + type: boolean tag_key: description: 'This will add the Fluentd tag in the JSON record.(default: tag)' @@ -644,6 +698,16 @@ spec: degree of sub-second time precision to preserve from the time portion of the routed event. type: string + unrecoverable_error_types: + description: Default unrecoverable_error_types parameter is set + up strictly. Because es_rejected_execution_exception is caused + by exceeding Elasticsearch's thread pool capacity. Advanced users + can increase its capacity, but normal users should follow default + behavior. If you want to increase it and forcibly retrying bulk + request, please consider to change unrecoverable_error_types parameter + from default value. Change default value of thread_pool.bulk.queue_size + in elasticsearch.yml) + type: string user: description: User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. @@ -654,6 +718,20 @@ spec: with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true)' type: boolean + validate_client_version: + description: 'When you use mismatched Elasticsearch server and client + libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch. + (default: false)' + type: boolean + verify_es_version_at_startup: + description: 'Because Elasticsearch plugin should change behavior + each of Elasticsearch major versions. For example, Elasticsearch + 6 starts to prohibit multiple type_names in one index, and Elasticsearch + 7 will handle only _doc type_name in index. If you want to disable + to verify Elasticsearch version at start up, set it as false. + When using the following configuration, ES plugin intends to communicate + into Elasticsearch 6. (default: true)' + type: boolean with_transporter_log: description: 'This is debugging purpose option to enable to obtain transporter layer log. (default: false)' diff --git a/charts/logging-operator/templates/logging.banzaicloud.io_outputs.yaml b/charts/logging-operator/templates/logging.banzaicloud.io_outputs.yaml index 6f4eb0723..ceaf804b4 100644 --- a/charts/logging-operator/templates/logging.banzaicloud.io_outputs.yaml +++ b/charts/logging-operator/templates/logging.banzaicloud.io_outputs.yaml @@ -410,11 +410,21 @@ spec: required: - timekey type: object + bulk_message_request_threshold: + description: 'Configure bulk_message request splitting threshold + size. Default value is 20MB. (20 * 1024 * 1024) If you specify + this size as negative number, bulk_message request splitting feature + will be disabled. (default: 20MB)' + type: string content_type: description: 'With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Type in payload. (default: application/json)' type: string + custom_headers: + description: 'This parameter adds additional headers to request. + Example: {"token":"secret"} (default: {})' + type: string customize_template: description: Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be @@ -422,16 +432,36 @@ spec: template and to add rollover index please check the rollover_index configuration. type: string + default_elasticsearch_version: + description: This parameter changes that ES plugin assumes default + Elasticsearch version. The default value is 5. + type: string deflector_alias: description: Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API type: string + exception_backup: + description: 'Indicates whether to backup chunk when ignore exception + occurs. (default: true)' + type: boolean fail_on_putting_template_retry_exceed: description: 'Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true)' type: boolean + flatten_hashes: + description: 'Elasticsearch will complain if you send object and + concrete values to the same field. For example, you might have + logs that look this, from different places: {"people" => 100} + {"people" => {"some" => "thing"}} The second log line will be + rejected by the Elasticsearch parser because objects and concrete + values can''t live in the same field. To combat this, you can + enable hash flattening.' + type: boolean + flatten_hashes_separator: + description: Flatten separator + type: string host: description: You can specify Elasticsearch host by this parameter. (default:localhost) @@ -448,6 +478,16 @@ spec: id_key: description: https://github.com/uken/fluent-plugin-elasticsearch#id_key type: string + ignore_exceptions: + description: A list of exception that will be ignored - when the + exception occurs the chunk will be discarded and the buffer retry + mechanism won't be called. It is possible also to specify classes + at higher level in the hierarchy. For example `ignore_exceptions + ["Elasticsearch::Transport::Transport::ServerError"]` will match + all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, + Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, + etc. + type: string include_index_in_url: description: With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). @@ -471,6 +511,14 @@ spec: description: Specify the index prefix for the rollover index to be created. type: string + log_es_400_reason: + description: 'By default, the error logger won''t record the reason + for a 400 error from the Elasticsearch API unless you set log_level + to debug. However, this results in a lot of log spam, which isn''t + desirable if all you want is the 400 error reasons. You can set + this true to capture the 400 error reasons without all the other + debug logs. (default: false)' + type: boolean logstash_dateformat: description: 'Set the Logstash date format.(default: %Y.%m.%d)' type: string @@ -589,6 +637,12 @@ spec: description: If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2] type: string + suppress_doc_wrap: + description: 'By default, record body is wrapped by ''doc''. This + behavior can not handle update script requests. You can set this + to suppress doc wrapping and allow record body to be untouched. + (default: false)' + type: boolean tag_key: description: 'This will add the Fluentd tag in the JSON record.(default: tag)' @@ -644,6 +698,16 @@ spec: degree of sub-second time precision to preserve from the time portion of the routed event. type: string + unrecoverable_error_types: + description: Default unrecoverable_error_types parameter is set + up strictly. Because es_rejected_execution_exception is caused + by exceeding Elasticsearch's thread pool capacity. Advanced users + can increase its capacity, but normal users should follow default + behavior. If you want to increase it and forcibly retrying bulk + request, please consider to change unrecoverable_error_types parameter + from default value. Change default value of thread_pool.bulk.queue_size + in elasticsearch.yml) + type: string user: description: User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. @@ -654,6 +718,20 @@ spec: with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true)' type: boolean + validate_client_version: + description: 'When you use mismatched Elasticsearch server and client + libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch. + (default: false)' + type: boolean + verify_es_version_at_startup: + description: 'Because Elasticsearch plugin should change behavior + each of Elasticsearch major versions. For example, Elasticsearch + 6 starts to prohibit multiple type_names in one index, and Elasticsearch + 7 will handle only _doc type_name in index. If you want to disable + to verify Elasticsearch version at start up, set it as false. + When using the following configuration, ES plugin intends to communicate + into Elasticsearch 6. (default: true)' + type: boolean with_transporter_log: description: 'This is debugging purpose option to enable to obtain transporter layer log. (default: false)' diff --git a/config/crd/bases/logging.banzaicloud.io_clusteroutputs.yaml b/config/crd/bases/logging.banzaicloud.io_clusteroutputs.yaml index 19dc8a0de..27d37c70e 100644 --- a/config/crd/bases/logging.banzaicloud.io_clusteroutputs.yaml +++ b/config/crd/bases/logging.banzaicloud.io_clusteroutputs.yaml @@ -410,11 +410,21 @@ spec: required: - timekey type: object + bulk_message_request_threshold: + description: 'Configure bulk_message request splitting threshold + size. Default value is 20MB. (20 * 1024 * 1024) If you specify + this size as negative number, bulk_message request splitting feature + will be disabled. (default: 20MB)' + type: string content_type: description: 'With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Type in payload. (default: application/json)' type: string + custom_headers: + description: 'This parameter adds additional headers to request. + Example: {"token":"secret"} (default: {})' + type: string customize_template: description: Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be @@ -422,16 +432,36 @@ spec: template and to add rollover index please check the rollover_index configuration. type: string + default_elasticsearch_version: + description: This parameter changes that ES plugin assumes default + Elasticsearch version. The default value is 5. + type: string deflector_alias: description: Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API type: string + exception_backup: + description: 'Indicates whether to backup chunk when ignore exception + occurs. (default: true)' + type: boolean fail_on_putting_template_retry_exceed: description: 'Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true)' type: boolean + flatten_hashes: + description: 'Elasticsearch will complain if you send object and + concrete values to the same field. For example, you might have + logs that look this, from different places: {"people" => 100} + {"people" => {"some" => "thing"}} The second log line will be + rejected by the Elasticsearch parser because objects and concrete + values can''t live in the same field. To combat this, you can + enable hash flattening.' + type: boolean + flatten_hashes_separator: + description: Flatten separator + type: string host: description: You can specify Elasticsearch host by this parameter. (default:localhost) @@ -448,6 +478,16 @@ spec: id_key: description: https://github.com/uken/fluent-plugin-elasticsearch#id_key type: string + ignore_exceptions: + description: A list of exception that will be ignored - when the + exception occurs the chunk will be discarded and the buffer retry + mechanism won't be called. It is possible also to specify classes + at higher level in the hierarchy. For example `ignore_exceptions + ["Elasticsearch::Transport::Transport::ServerError"]` will match + all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, + Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, + etc. + type: string include_index_in_url: description: With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). @@ -471,6 +511,14 @@ spec: description: Specify the index prefix for the rollover index to be created. type: string + log_es_400_reason: + description: 'By default, the error logger won''t record the reason + for a 400 error from the Elasticsearch API unless you set log_level + to debug. However, this results in a lot of log spam, which isn''t + desirable if all you want is the 400 error reasons. You can set + this true to capture the 400 error reasons without all the other + debug logs. (default: false)' + type: boolean logstash_dateformat: description: 'Set the Logstash date format.(default: %Y.%m.%d)' type: string @@ -589,6 +637,12 @@ spec: description: If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2] type: string + suppress_doc_wrap: + description: 'By default, record body is wrapped by ''doc''. This + behavior can not handle update script requests. You can set this + to suppress doc wrapping and allow record body to be untouched. + (default: false)' + type: boolean tag_key: description: 'This will add the Fluentd tag in the JSON record.(default: tag)' @@ -644,6 +698,16 @@ spec: degree of sub-second time precision to preserve from the time portion of the routed event. type: string + unrecoverable_error_types: + description: Default unrecoverable_error_types parameter is set + up strictly. Because es_rejected_execution_exception is caused + by exceeding Elasticsearch's thread pool capacity. Advanced users + can increase its capacity, but normal users should follow default + behavior. If you want to increase it and forcibly retrying bulk + request, please consider to change unrecoverable_error_types parameter + from default value. Change default value of thread_pool.bulk.queue_size + in elasticsearch.yml) + type: string user: description: User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. @@ -654,6 +718,20 @@ spec: with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true)' type: boolean + validate_client_version: + description: 'When you use mismatched Elasticsearch server and client + libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch. + (default: false)' + type: boolean + verify_es_version_at_startup: + description: 'Because Elasticsearch plugin should change behavior + each of Elasticsearch major versions. For example, Elasticsearch + 6 starts to prohibit multiple type_names in one index, and Elasticsearch + 7 will handle only _doc type_name in index. If you want to disable + to verify Elasticsearch version at start up, set it as false. + When using the following configuration, ES plugin intends to communicate + into Elasticsearch 6. (default: true)' + type: boolean with_transporter_log: description: 'This is debugging purpose option to enable to obtain transporter layer log. (default: false)' diff --git a/config/crd/bases/logging.banzaicloud.io_outputs.yaml b/config/crd/bases/logging.banzaicloud.io_outputs.yaml index 6f4eb0723..ceaf804b4 100644 --- a/config/crd/bases/logging.banzaicloud.io_outputs.yaml +++ b/config/crd/bases/logging.banzaicloud.io_outputs.yaml @@ -410,11 +410,21 @@ spec: required: - timekey type: object + bulk_message_request_threshold: + description: 'Configure bulk_message request splitting threshold + size. Default value is 20MB. (20 * 1024 * 1024) If you specify + this size as negative number, bulk_message request splitting feature + will be disabled. (default: 20MB)' + type: string content_type: description: 'With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Type in payload. (default: application/json)' type: string + custom_headers: + description: 'This parameter adds additional headers to request. + Example: {"token":"secret"} (default: {})' + type: string customize_template: description: Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be @@ -422,16 +432,36 @@ spec: template and to add rollover index please check the rollover_index configuration. type: string + default_elasticsearch_version: + description: This parameter changes that ES plugin assumes default + Elasticsearch version. The default value is 5. + type: string deflector_alias: description: Specify the deflector alias which would be assigned to the rollover index created. This is useful in case of using the Elasticsearch rollover API type: string + exception_backup: + description: 'Indicates whether to backup chunk when ignore exception + occurs. (default: true)' + type: boolean fail_on_putting_template_retry_exceed: description: 'Indicates whether to fail when max_retry_putting_template is exceeded. If you have multiple output plugin, you could use this property to do not fail on fluentd statup.(default: true)' type: boolean + flatten_hashes: + description: 'Elasticsearch will complain if you send object and + concrete values to the same field. For example, you might have + logs that look this, from different places: {"people" => 100} + {"people" => {"some" => "thing"}} The second log line will be + rejected by the Elasticsearch parser because objects and concrete + values can''t live in the same field. To combat this, you can + enable hash flattening.' + type: boolean + flatten_hashes_separator: + description: Flatten separator + type: string host: description: You can specify Elasticsearch host by this parameter. (default:localhost) @@ -448,6 +478,16 @@ spec: id_key: description: https://github.com/uken/fluent-plugin-elasticsearch#id_key type: string + ignore_exceptions: + description: A list of exception that will be ignored - when the + exception occurs the chunk will be discarded and the buffer retry + mechanism won't be called. It is possible also to specify classes + at higher level in the hierarchy. For example `ignore_exceptions + ["Elasticsearch::Transport::Transport::ServerError"]` will match + all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, + Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, + etc. + type: string include_index_in_url: description: With this option set to true, Fluentd manifests the index name in the request URL (rather than in the request body). @@ -471,6 +511,14 @@ spec: description: Specify the index prefix for the rollover index to be created. type: string + log_es_400_reason: + description: 'By default, the error logger won''t record the reason + for a 400 error from the Elasticsearch API unless you set log_level + to debug. However, this results in a lot of log spam, which isn''t + desirable if all you want is the 400 error reasons. You can set + this true to capture the 400 error reasons without all the other + debug logs. (default: false)' + type: boolean logstash_dateformat: description: 'Set the Logstash date format.(default: %Y.%m.%d)' type: string @@ -589,6 +637,12 @@ spec: description: If you want to configure SSL/TLS version, you can specify ssl_version parameter. [SSLv23, TLSv1, TLSv1_1, TLSv1_2] type: string + suppress_doc_wrap: + description: 'By default, record body is wrapped by ''doc''. This + behavior can not handle update script requests. You can set this + to suppress doc wrapping and allow record body to be untouched. + (default: false)' + type: boolean tag_key: description: 'This will add the Fluentd tag in the JSON record.(default: tag)' @@ -644,6 +698,16 @@ spec: degree of sub-second time precision to preserve from the time portion of the routed event. type: string + unrecoverable_error_types: + description: Default unrecoverable_error_types parameter is set + up strictly. Because es_rejected_execution_exception is caused + by exceeding Elasticsearch's thread pool capacity. Advanced users + can increase its capacity, but normal users should follow default + behavior. If you want to increase it and forcibly retrying bulk + request, please consider to change unrecoverable_error_types parameter + from default value. Change default value of thread_pool.bulk.queue_size + in elasticsearch.yml) + type: string user: description: User for HTTP Basic authentication. This plugin will escape required URL encoded characters within %{} placeholders. @@ -654,6 +718,20 @@ spec: with UTC (Coordinated Universal Time). This option allows to use local time if you describe utc_index to false.(default: true)' type: boolean + validate_client_version: + description: 'When you use mismatched Elasticsearch server and client + libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch. + (default: false)' + type: boolean + verify_es_version_at_startup: + description: 'Because Elasticsearch plugin should change behavior + each of Elasticsearch major versions. For example, Elasticsearch + 6 starts to prohibit multiple type_names in one index, and Elasticsearch + 7 will handle only _doc type_name in index. If you want to disable + to verify Elasticsearch version at start up, set it as false. + When using the following configuration, ES plugin intends to communicate + into Elasticsearch 6. (default: true)' + type: boolean with_transporter_log: description: 'This is debugging purpose option to enable to obtain transporter layer log. (default: false)' diff --git a/docs/deploy/manifests/deployment.yaml b/docs/deploy/manifests/deployment.yaml index 87095c387..ea87aef79 100644 --- a/docs/deploy/manifests/deployment.yaml +++ b/docs/deploy/manifests/deployment.yaml @@ -14,7 +14,11 @@ spec: spec: containers: - name: logging-operator +<<<<<<< HEAD image: "banzaicloud/logging-operator:2.1.1" +======= + image: "banzaicloud/logging-operator:2.1.0" +>>>>>>> Add ES params imagePullPolicy: IfNotPresent resources: {} diff --git a/docs/plugins/outputs/elasticsearch.md b/docs/plugins/outputs/elasticsearch.md index 999f9ba40..a461e3bcd 100644 --- a/docs/plugins/outputs/elasticsearch.md +++ b/docs/plugins/outputs/elasticsearch.md @@ -56,4 +56,16 @@ | time_parse_error_tag | string | No | - | With logstash_format true, elasticsearch plugin parses timestamp field for generating index name. If the record has invalid timestamp value, this plugin emits an error event to @ERROR label with time_parse_error_tag configured tag.
| | http_backend | string | No | excon | With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive.
| | prefer_oj_serializer | bool | No | fqlse | With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder.
| +| flatten_hashes | bool | No | - | Elasticsearch will complain if you send object and concrete values to the same field. For example, you might have logs that look this, from different places:
{"people" => 100} {"people" => {"some" => "thing"}}
The second log line will be rejected by the Elasticsearch parser because objects and concrete values can't live in the same field. To combat this, you can enable hash flattening.
| +| flatten_hashes_separator | string | No | - | Flatten separator
| +| validate_client_version | bool | No | false | When you use mismatched Elasticsearch server and client libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch.
| +| unrecoverable_error_types | string | No | - | Default unrecoverable_error_types parameter is set up strictly. Because es_rejected_execution_exception is caused by exceeding Elasticsearch's thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior.
If you want to increase it and forcibly retrying bulk request, please consider to change unrecoverable_error_types parameter from default value.
Change default value of thread_pool.bulk.queue_size in elasticsearch.yml)
| +| verify_es_version_at_startup | bool | No | true | Because Elasticsearch plugin should change behavior each of Elasticsearch major versions.
For example, Elasticsearch 6 starts to prohibit multiple type_names in one index, and Elasticsearch 7 will handle only _doc type_name in index.
If you want to disable to verify Elasticsearch version at start up, set it as false.
When using the following configuration, ES plugin intends to communicate into Elasticsearch 6.
| +| default_elasticsearch_version | string | No | - | This parameter changes that ES plugin assumes default Elasticsearch version. The default value is 5.
| +| custom_headers | string | No | {} | This parameter adds additional headers to request. Example: {"token":"secret"}
| +| log_es_400_reason | bool | No | false | By default, the error logger won't record the reason for a 400 error from the Elasticsearch API unless you set log_level to debug. However, this results in a lot of log spam, which isn't desirable if all you want is the 400 error reasons. You can set this true to capture the 400 error reasons without all the other debug logs.
| +| suppress_doc_wrap | bool | No | false | By default, record body is wrapped by 'doc'. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched.
| +| ignore_exceptions | string | No | - | A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won't be called. It is possible also to specify classes at higher level in the hierarchy. For example
`ignore_exceptions ["Elasticsearch::Transport::Transport::ServerError"]`
will match all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, etc.
| +| exception_backup | bool | No | true | Indicates whether to backup chunk when ignore exception occurs.
| +| bulk_message_request_threshold | string | No | 20MB | Configure bulk_message request splitting threshold size.
Default value is 20MB. (20 * 1024 * 1024)
If you specify this size as negative number, bulk_message request splitting feature will be disabled.
| | buffer | *Buffer | No | - | [Buffer](./buffer.md)
| diff --git a/pkg/model/output/elasticsearch.go b/pkg/model/output/elasticsearch.go index 4c8aea6a8..a520d823f 100644 --- a/pkg/model/output/elasticsearch.go +++ b/pkg/model/output/elasticsearch.go @@ -54,10 +54,8 @@ type ElasticsearchOutput struct { TargetTypeKey string `json:"target_type_key,omitempty"` // The name of the template to define. If a template by the name given is already present, it will be left unchanged, unless template_overwrite is set, in which case the template will be updated. TemplateName string `json:"template_name,omitempty"` - // The path to the file containing the template to install. TemplateFile string `json:"template_file,omitempty"` - // Specify index templates in form of hash. Can contain multiple templates. Templates string `json:"templates,omitempty"` // Specify the string and its value to be replaced in form of hash. Can contain multiple key value pair that would be replaced in the specified template_file. This setting only creates template and to add rollover index please check the rollover_index configuration. @@ -81,7 +79,6 @@ type ElasticsearchOutput struct { // You can specify times of retry obtaining Elasticsearch version.(default: 15) MaxRetryGetEsVersion string `json:"max_retry_get_es_version,omitempty"` - // You can specify HTTP request timeout.(default: 5s) RequestTimeout string `json:"request_timeout,omitempty"` // You can tune how the elasticsearch-transport host reloading feature works.(default: true) @@ -90,12 +87,10 @@ type ElasticsearchOutput struct { ReloadOnFailure bool `json:"reload_on_failure,omitempty"` // You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport's pool will be resurrected.(default: 60s) ResurrectAfter string `json:"resurrect_after,omitempty"` - // This will add the Fluentd tag in the JSON record.(default: false) IncludeTagKey bool `json:"include_tag_key,omitempty"` // This will add the Fluentd tag in the JSON record.(default: tag) TagKey string `json:"tag_key,omitempty"` - // https://github.com/uken/fluent-plugin-elasticsearch#id_key IdKey string `json:"id_key,omitempty"` // Similar to parent_key config, will add _routing into elasticsearch command if routing_key is set and the field does exist in input event. @@ -120,10 +115,43 @@ type ElasticsearchOutput struct { TimeParseErrorTag string `json:"time_parse_error_tag,omitempty"` // With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. (default: excon) HttpBackend string `json:"http_backend,omitempty"` - // With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder. (default: fqlse) - OreferOjSerializer bool `json:"prefer_oj_serializer,omitempty"` - + PreferOjSerializer bool `json:"prefer_oj_serializer,omitempty"` + // Elasticsearch will complain if you send object and concrete values to the same field. For example, you might have logs that look this, from different places: + //{"people" => 100} {"people" => {"some" => "thing"}} + //The second log line will be rejected by the Elasticsearch parser because objects and concrete values can't live in the same field. To combat this, you can enable hash flattening. + FlattenHashes bool `json:"flatten_hashes,omitempty"` + // Flatten separator + FlattenHashesSeparator string `json:"flatten_hashes_separator,omitempty"` + // When you use mismatched Elasticsearch server and client libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch. (default: false) + ValidateClientVersion bool `json:"validate_client_version,omitempty"` + // Default unrecoverable_error_types parameter is set up strictly. Because es_rejected_execution_exception is caused by exceeding Elasticsearch's thread pool capacity. Advanced users can increase its capacity, but normal users should follow default behavior. + // If you want to increase it and forcibly retrying bulk request, please consider to change unrecoverable_error_types parameter from default value. + // Change default value of thread_pool.bulk.queue_size in elasticsearch.yml) + UnrecoverableErrorTypes string `json:"unrecoverable_error_types,omitempty"` + // Because Elasticsearch plugin should change behavior each of Elasticsearch major versions. + // For example, Elasticsearch 6 starts to prohibit multiple type_names in one index, and Elasticsearch 7 will handle only _doc type_name in index. + // If you want to disable to verify Elasticsearch version at start up, set it as false. + // When using the following configuration, ES plugin intends to communicate into Elasticsearch 6. (default: true) + VerifyEsVersionAtStartup bool `json:"verify_es_version_at_startup,omitempty"` + // This parameter changes that ES plugin assumes default Elasticsearch version. The default value is 5. + DefaultElasticsearchVersion string `json:"default_elasticsearch_version,omitempty"` + // This parameter adds additional headers to request. Example: {"token":"secret"} (default: {}) + CustomHeaders string `json:"custom_headers,omitempty"` + // By default, the error logger won't record the reason for a 400 error from the Elasticsearch API unless you set log_level to debug. However, this results in a lot of log spam, which isn't desirable if all you want is the 400 error reasons. You can set this true to capture the 400 error reasons without all the other debug logs. (default: false) + LogEs400Reason bool `json:"log_es_400_reason,omitempty"` + // By default, record body is wrapped by 'doc'. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched. (default: false) + SuppressDocWrap bool `json:"suppress_doc_wrap,omitempty"` + // A list of exception that will be ignored - when the exception occurs the chunk will be discarded and the buffer retry mechanism won't be called. It is possible also to specify classes at higher level in the hierarchy. For example + // `ignore_exceptions ["Elasticsearch::Transport::Transport::ServerError"]` + // will match all subclasses of ServerError - Elasticsearch::Transport::Transport::Errors::BadRequest, Elasticsearch::Transport::Transport::Errors::ServiceUnavailable, etc. + IgnoreExceptions string `json:"ignore_exceptions,omitempty"` + // Indicates whether to backup chunk when ignore exception occurs. (default: true) + ExceptionBackup bool `json:"exception_backup,omitempty"` + // Configure bulk_message request splitting threshold size. + // Default value is 20MB. (20 * 1024 * 1024) + // If you specify this size as negative number, bulk_message request splitting feature will be disabled. (default: 20MB) + BulkMessageRequestThreshold string `json:"bulk_message_request_threshold,omitempty"` // +docLink:"Buffer,./buffer.md" Buffer *Buffer `json:"buffer,omitempty"` }