Filebeat Configuration Example
############### Filebeat #############
filebeat:
# List of prospectors to fetch data.
prospectors:
-
# paths Specify the log to be monitored
paths:
- /var/log/*.log
#Specify the encoding type of the monitored file. Both plain and utf-8 can process Chinese logs.
# Some sample encodings:
# plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
# hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
#encoding: plain
#Specify the input type of the file log (default) or stdin.
input_type: log
# Exclude lines that match the regular expression list in the input
# exclude_lines: ["^DBG"]
# The lines containing the regular expression list in the input that matches the regular expression list include_lines will be executed after execution.
# include_lines: ["^ERR", "^WARN"]
# Ignore files that match the regular expression list. By default, each file that matches the paths definition creates a harmer.
# exclude_files: [".gz$"]
# Add additional information such as "level:debug" to each output log to facilitate subsequent grouping and statistics of the logs. By default, subdirectories will be created with specified new fields in the fields subdirectory of the output information, for example.
#fields:
# level: debug
# review: 1
# If this option is set to true, add fields to the top-level directory instead of putting it in the fields directory. Custom fields will override filebeat default fields.
#fields_under_root: false
# You can specify that Filebeat ignores log content modified outside the specified time period, such as 2h and 2 hours or 5m (5 minutes).
#ignore_older: 0
# If a file has not been updated within a certain period of time, close the monitored file handle. By default, change will only be discovered in the next scan
#close_older: 1h
# iThe type field of the document when setting the output of Elasticsearch can also be used to classify the log. Default: log
#document_type: log
# Filebeat uses the directory specified by the prospector to detect file updates. For example, if there are new files. If set to 0s, Filebeat will sense as quickly as possible that the CPU occupied by the update will become higher. The default is 10s.
#scan_frequency: 10s
# The size of the buffer used when each harmer monitors the file.
#harvester_buffer_size: 16384
# Add a line to the log file to calculate a log event max_bytes limits the number of bytes uploaded at most log event in a log event. The number of extra bytes that are uploaded will be discarded. The default is 10MB.
#max_bytes: 10485760
# Applicable to situations where each log occupies multiple lines, such as the call stack for error messages in various languages. This configuration contains the following configuration
#multiline:
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
#pattern: ^\[
# Defines if the pattern set under pattern should be negated or not. Default is false.
#negate: false
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
#match: after
# The maximum number of lines that are combined to one event.
# In case there are more the max_lines the additional lines are discarded.
# Default is 500
#max_lines: 500
# After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event
# Default is 5s.
#timeout: 5s
# If set to trueFilebeat, monitor the new content of the file from the end of the file. Send each line of the file as an event in turn instead of resending all content from the beginning of the file.
#tail_files: false
# Filebeat detects that a file has reached EOF, and waits for each time before checking whether the file has been updated. The default is 1s.
#backoff: 1s
# Filebeat detects that a file reaches EOF, the maximum time to wait for the detection file to be updated by 10 seconds.
#max_backoff: 10s
# Define the speed of max_backoff. The default factor is 2. After reaching max_backoff, it becomes waiting for max_backoff for so long before backoff is restored once until the file is updated.
#backoff_factor: 2
# This option closes a file when the file name changes. #This configuration option is recommended onlywindows。
#force_close_files: false
# Additional prospector
#-
# Configuration to use stdin input
#input_type: stdin
# spooler size When the number of events in spooler exceeds this threshold, it will be cleared and sent out regardless of whether the timeout is reached.
#spool_size: 2048
# Whether to use asynchronous sending mode (experiment!)
#publish_async: false
# Spooler timeout If the timeout time is reached, spooler will also clear the sending threshold regardless of whether the capacity is reached or not.
#idle_timeout: 5s
# Files that record the location where filebeat processes log files
registry_file: /var/lib/filebeat/registry
# If you want to introduce configuration files in other locations in this configuration file, you can write the full path here but only deal with the part of the prospector.
#config_dir:
############################# Output ##########################################
# Output to data configuration. Single instance data can be output to elasticsearch or logstash to select one of the comments out the other set of output configurations.
output:
### Output data to Elasticsearch
elasticsearch:
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
hosts: ["localhost:9200"]
# Output authentication.
#protocol: "https"
#username: "admin"
#password: "s3cr3t"
# Number of start processes.
#worker: 1
# Output data to the specified index default is "filebeat" The variable [filebeat-] keys can be used.
#index: "filebeat"
# onetemplateUsed to set the default template loading in Elasticsearch map is disabled, no loading templates can be adjusted or overridden for existing loading of your own templates.
#template:
# Template name. default is filebeat.
#name: "filebeat"
# Path to template file
#path: ""
# Overwrite existing template
#overwrite: false
# Optional HTTP Path
#path: "/elasticsearch"
# Proxy server url
#proxy_url: http://proxy:3128
# The number of send retry depends on the setting of max_retries is 3
#max_retries: 3
# Maximum number of events for a single elasticsearch batch API index request. The default is 50.
#bulk_max_size: 50
# elasticsearch request timeout event. Default is 90 seconds.
#timeout: 90
# The number of seconds to wait between two batch API index requests for a new event. If bulk_max_size arrives before the value, the additional batch index request takes effect.
#flush_interval: 1
# elasticsearch maintains topology. Default false. This value only supports Packetbeat.
#save_topology: false
# elasticsearch valid time to save topological information. Default is 15 seconds.
#topology_expire: 15
# Configure TLS parameter options such as certificate authority for https-based connections. If tls loses host's CAs are used for https connection elasticsearch.
#tls:
# List of root certificates for HTTPS server verifications
#certificate_authorities: ["/etc/pki/root/"]
# Certificate for TLS client authentication
#certificate: "/etc/pki/client/"
# Client Certificate Key
#certificate_key: "/etc/pki/client/"
# Controls whether the client verifies server certificates and host name.
# If insecure is set to true, all server host names and certificates will be
# accepted. In this mode TLS based connections are susceptible to
# man-in-the-middle attacks. Use only for testing.
#insecure: true
# Configure cipher suites to be used for TLS connections
#cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#curve_types: []
# Configure minimum TLS version allowed for connection to logstash
#min_version: 1.0
# Configure maximum TLS version allowed for connection to logstash
#max_version: 1.2
### Send data to logstash Single instance data can be output to elasticsearch or logstash Select one of the comments out the other set of output configurations.
#logstash:
#Logstash Host Address
#hosts: ["localhost:5044"]
# Configure the number of workers to publish events per host. It is best enabled in load balancing mode.
#worker: 1
# #Send data compression level
#compression_level: 3
# If set to TRUE and multiple logstash host output plugins are configured, load balancing publish events to all logstash hosts.
#If set to false, the output plugin sends all events to a random host. If the selected unreachable, it will switch to another host. The default is false.
#loadbalance: true
# Output data to the specified index default is "filebeat" The variable [filebeat-] keys can be used.
#index: filebeat
# Optional TLS. By default is off.
#Configure TLS parameter options such as certificate authority for https-based connections. If tls loses host's CAs are used for https connection elasticsearch.
#tls:
# List of root certificates for HTTPS server verifications
#certificate_authorities: ["/etc/pki/root/"]
# Certificate for TLS client authentication
#certificate: "/etc/pki/client/"
# Client Certificate Key
#certificate_key: "/etc/pki/client/"
# Controls whether the client verifies server certificates and host name.
# If insecure is set to true, all server host names and certificates will be
# accepted. In this mode TLS based connections are susceptible to
# man-in-the-middle attacks. Use only for testing.
#insecure: true
# Configure cipher suites to be used for TLS connections
#cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#curve_types: []
### File output transfers transactions to a file. Each transaction is in JSON format. Mainly used for testing. Can also be used as logstash input.
#file:
# Specify the path to save the file.
#path: "/tmp/filebeat"
# file name. The default is the Beat name. The above configuration will generate packetbeat, packetbeat.1, packetbeat.2 and other files.
#filename: filebeat
# Define the maximum size of each file. When the size reaches the value file will roll. The default value is 1000 KB
#rotate_every_kb: 10000
# Maximum number of files retained. When the number of files reaches this value, the oldest file will be deleted. The default is 7 per week.
#number_of_files: 7
### Consoleoutput Standard output JSON format.
# console:
#If set to TRUE event, the standard output will be formatted friendly. Default false.
#pretty: false
############################# Shipper #########################################
shipper:
# #Log sender information label
# If you do not set it to claim yourself as the hostname name. This name is included in the shipper field for each publishing transaction. All transaction packets sent by a single beat can be grouped by that name.
#name:
# The beat tag list is contained in the tags field for each publishing transaction. Tags are easily grouped by different logics.
#For example, a web cluster server can add the webservers tag to beat and then filter and query the entire group of servers with this tag in the visualisation interface of Kibana.
#tags: ["service-X", "web-tier"]
# If the ignore_outgoing option is enabled, beat will ignore all transactions from the running beat server.
#ignore_outgoing: true
# The interval for topology diagram refresh. That is, set the frequency at which each beat publishes its IP address to the topology diagram. The default is 10 seconds.
#refresh_topology_freq: 10
# Expiry time of topology. Very useful when beat stops publishing its IP address. When expired, the IP address will be automatically deleted from the topology diagram. The default is 15 seconds.
#topology_expire: 15
# Internal queue size for single events in processing pipeline
#queue_size: 1000
# Search path for GeoIP database. beat finds the GeoIP database and loads it and then outputs the GeoIP location of each transaction client. Currently only Packetbeat uses this option.
#geoip:
#paths:
# - "/usr/share/GeoIP/"
# - "/usr/local/var/GeoIP/"
############################# Logging #########################################
# Configure beats logs. Logs can be written to syslog or rolling log files. The default is syslog.
logging:
# If Send all logs to system logs is enabled.
#to_syslog: true
# The log is sent to the scroll file.
#to_files: false
#
files:
# Log file directory.
#path: /var/log/mybeat
# Log file name
#name: mybeat
# Maximum size of log file. Default: 10485760 (10 MB).
rotateeverybytes: 10485760 # = 10MB
# Keep log cycles. Default is 7. Values range from 2 to 1024.
#keepfiles: 7
# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are beat, publish, service
# Multiple selectors can be chained.
#selectors: [ ]
# Log level. debug, info, warning, error or critical. If debug is used but selectors*selectors are not configured, it will be used. Default error.
#level: error
#Reference Document
#/elk/elk-beats-common-configure-section-describe/