EFK step by step install

单机环境本地安装:

elasticsearch 6.8.6

fluentd 1.16.1

kibana 6.8.6


--base setup

$ ulimit -n

65535


$ more /etc/security/limits.conf 

root soft nofile 65536

root hard nofile 65536

* soft nofile 65536

* hard nofile 65536


$ more /etc/sysctl.conf

net.core.somaxconn = 1024

net.core.netdev_max_backlog = 5000

net.core.rmem_max = 16777216

net.core.wmem_max = 16777216

net.ipv4.tcp_wmem = 4096 12582912 16777216

net.ipv4.tcp_rmem = 4096 12582912 16777216

net.ipv4.tcp_max_syn_backlog = 8096

net.ipv4.tcp_slow_start_after_idle = 0

net.ipv4.tcp_tw_reuse = 1

net.ipv4.ip_local_port_range = 10240 65535

# If forward uses port 24224, reserve that port number for use as an ephemeral port.

# If another port, e.g., monitor_agent uses port 24220, add a comma-separated list of port numbers.

# net.ipv4.ip_local_reserved_ports = 24220,24224

net.ipv4.ip_local_reserved_ports = 24224



$ /etc/sysctl.d/10-link-restrictions.conf, or /usr/lib/sysctl.d/50-default.conf 

fs.protected_hardlinks = 1

fs.protected_symlinks = 1



--elasticsearch install 

wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.8.6.rpm

yum localinstall elasticsearch-6.8.6.rpm -y


cd /etc/elasticsearch/

cp elasticsearch.yml elasticsearch.yml.bak

vi elasticsearch.yml

[root@almalinux ~]# more /etc/elasticsearch/elasticsearch.yml

# ======================== Elasticsearch Configuration =========================

#

# NOTE: Elasticsearch comes with reasonable defaults for most settings.

#       Before you set out to tweak and tune the configuration, make sure you

#       understand what are you trying to accomplish and the consequences.

#

# The primary way of configuring a node is via this file. This template lists

# the most important settings you may want to configure for a production cluster.

#

# Please consult the documentation for further information on configuration options:

# https://www.elastic.co/guide/en/elasticsearch/reference/index.html

#

# ---------------------------------- Cluster -----------------------------------

#

# Use a descriptive name for your cluster:

#

cluster.name: ELK-Cluster 

#

# ------------------------------------ Node ------------------------------------

#

# Use a descriptive name for the node:

#

node.name: node-1

#

# Add custom attributes to the node:

#

#node.attr.rack: r1

#

# ----------------------------------- Paths ------------------------------------

#

# Path to directory where to store the data (separate multiple locations by comma):

#

path.data: /var/lib/elasticsearch

#

# Path to log files:

#

path.logs: /var/log/elasticsearch

#

# ----------------------------------- Memory -----------------------------------

#

# Lock the memory on startup:

#

bootstrap.memory_lock: true

#

# Make sure that the heap size is set to about half the memory available

# on the system and that the owner of the process is allowed to use this

# limit.

#

# Elasticsearch performs poorly when the system is swapping the memory.

#

# ---------------------------------- Network -----------------------------------

#

# Set the bind address to a specific IP (IPv4 or IPv6):

#

network.host: 192.168.100.126

#

# Set a custom port for HTTP:

#

http.port: 9200

#

# For more information, consult the network module documentation.

#

# --------------------------------- Discovery ----------------------------------

#

# Pass an initial list of hosts to perform discovery when new node is started:

# The default list of hosts is ["127.0.0.1", "[::1]"]

#

discovery.zen.ping.unicast.hosts: ["192.168.100.126"]

#

# Prevent the "split brain" by configuring the majority of nodes (total number of master-eligible nodes / 2 + 1):

#

#discovery.zen.minimum_master_nodes: 

#

# For more information, consult the zen discovery module documentation.

#

# ---------------------------------- Gateway -----------------------------------

#

# Block initial recovery after a full cluster restart until N nodes are started:

#

#gateway.recover_after_nodes: 3

#

# For more information, consult the gateway module documentation.

#

# ---------------------------------- Various -----------------------------------

#

# Require explicit names when deleting indices:

#

#action.destructive_requires_name: true

systemctl start elasticsearch.service 

systemctl status elasticsearch.service 


curl http://192.168.100.128:9200/_cluster/health?pretty=true

{

  "cluster_name" : "ELK-Cluster",

  "status" : "yellow",

  "timed_out" : false,

  "number_of_nodes" : 1,

  "number_of_data_nodes" : 1,

  "active_primary_shards" : 17,

  "active_shards" : 17,

  "relocating_shards" : 0,

  "initializing_shards" : 0,

  "unassigned_shards" : 15,

  "delayed_unassigned_shards" : 0,

  "number_of_pending_tasks" : 0,

  "number_of_in_flight_fetch" : 0,

  "task_max_waiting_in_queue_millis" : 0,

  "active_shards_percent_as_number" : 53.125

}


vi /usr/lib/systemd/system/elasticsearch.service

LimitMEMLOCK=infinity --add value ,lock memoery 


--访问elasticsearch运行状态

http://192.168.100.126:9200/


--elasticsearch-head

关联太多,没有安装


--fluentd install

--Red Hat / CentOS

--Download and execute the install script with curl:

# td-agent 4

$ curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent4.sh | sh


# td-agent 3

$ curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent3.sh | sh


# /usr/sbin/td-agent-gem install fluent-plugin-elasticsearch

# /usr/sbin/td-agent-gem install fluent-plugin-typecast

# /usr/sbin/td-agent-gem install fluent-plugin-secure-forward


--安装fluent-plugin-elasticsearch时需要ruby 3.0以上版本,先安装2.6小版本安装UI时需2.7以上

--ruby 2.6 需编译安装

[root@almalinux ruby-2.6.0]# echo "PATH=$PATH:/usr/bin/ruby;export PATH" >> /etc/profile                    

[root@almalinux ruby-2.6.0]# ruby -v

ruby 2.6.0p0 (2018-12-25 revision 66547) [x86_64-linux]


--选择安装fluentd-ui

$ gem install -V fluentd-ui

$ fluentd-ui start

--default account credentials are:

username="admin"

password="changeme"


[root@almalinux ~]# cd /etc/td-agent/

[root@almalinux td-agent]# cp td-agent.conf td-agent.conf_bak


[root@almalinux td-agent]# more td-agent.conf

####

## Output descriptions:

##


# Treasure Data (http://www.treasure-data.com/) provides cloud based data

# analytics platform, which easily stores and processes data from td-agent.

# FREE plan is also provided.

# @see http://docs.fluentd.org/articles/http-to-td

#

# This section matches events whose tag is td.DATABASE.TABLE

  @type tdlog

  @id output_td

  apikey YOUR_API_KEY


  auto_create_table

 

    @type file

    path /var/log/td-agent/buffer/td

 


 

    @type file

    path /var/log/td-agent/failed_records

 


## match tag=debug.** and dump to console

  @type stdout

  @id output_stdout


####

## Source descriptions:

##


## built-in TCP input

## @see http://docs.fluentd.org/articles/in_forward

  @type forward

  @id input_forward


## built-in UNIX socket input

#

#  type unix

#


# HTTP input

# POST http://localhost:8888/?json=

# POST http://localhost:8888/td.myapp.login?json={"user"%3A"me"}

# @see http://docs.fluentd.org/articles/in_http

  @type http

  @id input_http

  port 8888


## live debugging agent

  @type debug_agent

  @id input_debug_agent

  bind 127.0.0.1

  port 24230


####

## Examples:

##


## File input

## read apache logs continuously and tags td.apache.access

#

#  @type tail

#  @id input_tail

#    @type apache2

#  path /var/log/httpd-access.log

#  tag td.apache.access

#


## File output

## match tag=local.** and write to file

#

#  @type file

#  @id output_file

#  path /var/log/td-agent/access

#


## Forwarding

## match tag=system.** and forward to another td-agent server

#

#  @type forward

#  @id output_system_forward

#

#    host 192.168.0.11

#  # secondary host is optional

#   

#      host 192.168.0.12

#   

#


## Multiple output

## match tag=td.*.* and output to Treasure Data AND file

#

#  @type copy

#  @id output_copy

#    @type tdlog

#    apikey API_KEY

#    auto_create_table

#   

#      @type file

#      path /var/log/td-agent/buffer/td

#   

#    @type file

#    path /var/log/td-agent/td-%Y-%m-%d/%H.log

#


@type tail

path /var/log/messages

pos_file /var/log/td-agent/messages.log.pos

tag message

@type json


@type stdout


@type copy

@type elasticsearch

host 192.168.100.126

port 9200

logstash_format true

logstash_prefix message-${tag}

logstash_dateformat %Y%m%d

include_tag_key true

type_name access_log

tag_key @log_name

flush_interval 1s

@type stdout

[root@almalinux td-agent]# systemctl start td-agent.service  


[root@almalinux ~]# curl  '192.168.100.126:9200/_cat/indices?v'

health status index                         uuid                   pri rep docs.count docs.deleted store.size pri.store.size

yellow open   message-fluent.warn-20230607  kCApM6hRSaiXjS5uduQZ-Q   5   1       4912            0      1.9mb          1.9mb

yellow open   message-fluent.error-20230607 V6W3_kZuRLaREnVtM_EpEw   5   1         45            0      141kb          141kb

yellow open   message-fluent.info-20230607  OYrYqc7hTX234M6grsfPRw   5   1         16            0     62.2kb         62.2kb

green  open   .kibana_task_manager          D9VEYYYMQamONgJeUM7AtQ   1   0          2            0     12.5kb         12.5kb

green  open   .kibana_1                     CSuTMPokSNuEg8zYP2q9TA   1   0          7            0     31.5kb         31.5kb


curl  'localhost:9200/_cat/indices?v'

#结果为red,主分片数据缺失,搜索只能返回部分数据,分配到这个分片上的写入请求会返回一个异常

#结果为yellow,主分片数据正常,副本分片数据有缺失,如果更多的分片消失,会丢数据

#结果为green,主分片和副本分片数据都正常。


--kibana install

# wget https://artifacts.elastic.co/downloads/kibana/kibana-6.8.6-x86_64.rpm

# yum localinstall kibana-6.8.6-x86_64.rpm -y


[root@almalinux ~]# vi /etc/kibana/kibana.yml  --重点几个参数配置

server.port: 5601

server.host: "192.168.100.126"

elasticsearch.hosts: ["http://192.168.100.126:9200"]

i18n.locale: "zh-CN"


[root@almalinux ~]# more /etc/kibana/kibana.yml

# Kibana is served by a back end server. This setting specifies the port to use.

server.port: 5601


# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.

# The default is 'localhost', which usually means remote machines will not be able to connect.

# To allow connections from remote users, set this parameter to a non-loopback address.

server.host: "192.168.100.126"


# Enables you to specify a path to mount Kibana at if you are running behind a proxy.

# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath

# from requests it receives, and to prevent a deprecation warning at startup.

# This setting cannot end in a slash.

#server.basePath: ""


# Specifies whether Kibana should rewrite requests that are prefixed with

# `server.basePath` or require that they are rewritten by your reverse proxy.

# This setting was effectively always `false` before Kibana 6.3 and will

# default to `true` starting in Kibana 7.0.

#server.rewriteBasePath: false


# The maximum payload size in bytes for incoming server requests.

#server.maxPayloadBytes: 1048576


# The Kibana server's name.  This is used for display purposes.

#server.name: "your-hostname"


# The URLs of the Elasticsearch instances to use for all your queries.

elasticsearch.hosts: ["http://192.168.100.126:9200"]


# When this setting's value is true Kibana uses the hostname specified in the server.host

# setting. When the value of this setting is false, Kibana uses the hostname of the host

# that connects to this Kibana instance.

#elasticsearch.preserveHost: true


# Kibana uses an index in Elasticsearch to store saved searches, visualizations and

# dashboards. Kibana creates a new index if the index doesn't already exist.

#kibana.index: ".kibana"


# The default application to load.

#kibana.defaultAppId: "home"


# If your Elasticsearch is protected with basic authentication, these settings provide

# the username and password that the Kibana server uses to perform maintenance on the Kibana

# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which

# is proxied through the Kibana server.

#elasticsearch.username: "user"

#elasticsearch.password: "pass"


# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.

# These settings enable SSL for outgoing requests from the Kibana server to the browser.

#server.ssl.enabled: false

#server.ssl.certificate: /path/to/your/server.crt

#server.ssl.key: /path/to/your/server.key


# Optional settings that provide the paths to the PEM-format SSL certificate and key files.

# These files validate that your Elasticsearch backend uses the same key files.

#elasticsearch.ssl.certificate: /path/to/your/client.crt

#elasticsearch.ssl.key: /path/to/your/client.key


# Optional setting that enables you to specify a path to the PEM file for the certificate

# authority for your Elasticsearch instance.

#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]


# To disregard the validity of SSL certificates, change this setting's value to 'none'.

#elasticsearch.ssl.verificationMode: full


# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of

# the elasticsearch.requestTimeout setting.

#elasticsearch.pingTimeout: 1500


# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value

# must be a positive integer.

#elasticsearch.requestTimeout: 30000


# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side

# headers, set this value to [] (an empty list).

#elasticsearch.requestHeadersWhitelist: [ authorization ]


# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten

# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.

#elasticsearch.customHeaders: {}


# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.

#elasticsearch.shardTimeout: 30000


# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.

#elasticsearch.startupTimeout: 5000


# Logs queries sent to Elasticsearch. Requires logging.verbose set to true.

#elasticsearch.logQueries: false


# Specifies the path where Kibana creates the process ID file.

#pid.file: /var/run/kibana.pid


# Enables you specify a file where Kibana stores log output.

#logging.dest: stdout


# Set the value of this setting to true to suppress all logging output.

#logging.silent: false


# Set the value of this setting to true to suppress all logging output other than error messages.

#logging.quiet: false


# Set the value of this setting to true to log all events, including system usage information

# and all requests.

#logging.verbose: false


# Set the interval in milliseconds to sample system and process performance

# metrics. Minimum is 100ms. Defaults to 5000.

#ops.interval: 5000


# Specifies locale to be used for all localizable strings, dates and number formats.

i18n.locale: "zh-CN"


[root@almalinux kibana]# systemctl start kibana.service 


[root@almalinux ~]# netstat -nltp                    

Active Internet connections (only servers)

Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    

tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      820/sshd            

tcp        0      0 0.0.0.0:8888            0.0.0.0:*               LISTEN      2405/ruby           

tcp        0      0 0.0.0.0:24224           0.0.0.0:*               LISTEN      2410/ruby           

tcp        0      0 0.0.0.0:5601            0.0.0.0:*               LISTEN      2895/node           

tcp        0      0 127.0.0.1:24230         0.0.0.0:*               LISTEN      2410/ruby           

tcp6       0      0 :::22                   :::*                    LISTEN      820/sshd            

tcp6       0      0 :::9200                 :::*                    LISTEN      2653/java           

tcp6       0      0 :::9300                 :::*                    LISTEN      2653/java 


--访问kibana

http://192.168.100.126:5601/


请使用浏览器的分享功能分享到微信等