Percona XtraDB Cluster 部署
配置本地光盘YUM源:
cd /etc/yum.repos.d/
vi CentOS-Media.repo
[c7-media]
name=CentOS-$releasever - Media
baseurl=file:///media/CentOS/
file:///media/cdrom/
file:///media/cdrecorder/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
挂载光盘
mount /dev/sr0 /media/CentOS
添加MYSQL用户
adduser mysql
tar vxf Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101.tar.gz
mv Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101 /usr/local
tar vxf percona-xtrabackup-2.4.7-Linux-x86_64.tar.gz
mv percona-xtrabackup-2.4.7-Linux-x86_64 /usr/local/
cd /usr/local/
chown mysql.mysql -R percona-xtrabackup-2.4.7-Linux-x86_64
vi /etc/profile
export PATH=/usr/local/percona-xtrabackup-2.4.7-Linux-x86_64/bin:$PATH
export PATH=/usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101/bin:$PATH
source /etc/profile
systemctl stop firewalld
systemctl disable firewalld.service
yum -y install openssl*
yum -y install socat
yum -y remove mariadb-libs-5.5.44-2.el7.centos.x86_64
vi /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
#SELINUX=enforcing
# SELINUXTYPE= can take one of three two values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
setenforce 0
getenforce
cd /usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101
mkdir data
chown -R mysql.mysql /usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101
#chown -R mysql.mysql data
bin/mysqld --initialize --basedir=/usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101 --datadir=/usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101/data --user=mysql
上面的操作需要在3个节点都执行
=============================================================
节点1:
vi /etc/my.cnf
[mysqld]
# Remove leading # and set to the amount of RAM for the most important data
# cache in MySQL. Start at 70% of total RAM for dedicated server, else 10%.
# innodb_buffer_pool_size = 128M
# Remove leading # to turn on a very important data integrity option: logging
# changes to the binary log between backups.
# log_bin
# These are commonly set, remove the # and set as required.
# basedir = .....
# datadir = .....
# port = .....
# server_id = .....
# socket = .....
basedir =/usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101
datadir = /usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101/data
socket =/tmp/mysql.sock
wsrep_provider=/usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101/lib/libgalera_smm.so
wsrep_cluster_name=pxc-cluster
wsrep_cluster_address=gcomm://192.168.222.230,192.168.222.157,192.168.222.158
wsrep_node_name=pxc1
wsrep_node_address=192.168.222.230
wsrep_sst_method=xtrabackup-v2
wsrep_sst_auth=sstuser:passw0rd
pxc_strict_mode=ENFORCING
binlog_format=ROW
default_storage_engine=InnoDB
innodb_autoinc_lock_mode=2
innodb_buffer_pool_size=10G
wsrep_causal_reads=ON
log_bin=mysql-bin
server_id=13
relay_log=mysql-relay-bin
innodb_log_file_size=2G
innodb_log_buffer_size=64M
innodb_io_capacity=1000
innodb_read_io_threads=8
innodb_write_io_threads=8
innodb_buffer_pool_instances=12
max_tmp_tables=100
query_cache_size=0
query_cache_type=0
max_connections=1000
max_prepared_stmt_count=1048576
innodb_support_xa =0
innodb_flush_method=O_DIRECT
innodb_open_files=4000
thread_cache_size=100
# Remove leading # to set options mainly useful for reporting servers.
# The server defaults are faster for transactions and fast SELECTs.
# Adjust sizes as needed, experiment to find the optimal values.
# join_buffer_size = 128M
# sort_buffer_size = 2M
# read_rnd_buffer_size = 2M
# thread pool
thread_handling = pool-of-threads
log_bin_trust_function_creators=1
log_timestamps=SYSTEM
sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES
chown mysql.mysql /etc/my.cnf
bin/mysqld_safe --defaults-file=/etc/my.cnf --wsrep-new-cluster &
mysql>
alter user 'root'@'localhost' identified by '123456';
FLUSH PRIVILEGES;
CREATE USER 'sstuser'@'localhost' IDENTIFIED BY 'passw0rd';
GRANT RELOAD, LOCK TABLES, PROCESS, REPLICATION CLIENT ON *.* TO'sstuser'@'localhost';
FLUSH PRIVILEGES;
节点2:
vi /etc/my.cnf
# For advice on how to change settings please see
# http://dev.mysql.com/doc/refman/5.7/en/server-configuration-defaults.html
# *** DO NOT EDIT THIS FILE. It's a template which will be copied to the
# *** default location during install, and will be replaced if you
# *** upgrade to a newer version of MySQL.
[mysqld]
# Remove leading # and set to the amount of RAM for the most important data
# cache in MySQL. Start at 70% of total RAM for dedicated server, else 10%.
# innodb_buffer_pool_size = 128M
# Remove leading # to turn on a very important data integrity option: logging
# changes to the binary log between backups.
# log_bin
# These are commonly set, remove the # and set as required.
# basedir = .....
# datadir = .....
# port = .....
# server_id = .....
# socket = .....
basedir =/usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101
datadir = /usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101/data
socket =/tmp/mysql.sock
wsrep_provider=/usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101/lib/libgalera_smm.so
wsrep_cluster_name=pxc-cluster
wsrep_cluster_address=gcomm://192.168.222.230,192.168.222.157,192.168.222.158
wsrep_node_name=pxc2
wsrep_node_address=192.168.222.157
wsrep_sst_method=xtrabackup-v2
wsrep_sst_auth=sstuser:passw0rd
pxc_strict_mode=ENFORCING
binlog_format=ROW
default_storage_engine=InnoDB
innodb_autoinc_lock_mode=2
innodb_buffer_pool_size=10G
wsrep_causal_reads=ON
log_bin=mysql-bin
server_id=13
relay_log=mysql-relay-bin
innodb_log_file_size=2G
innodb_log_buffer_size=64M
innodb_io_capacity=1000
innodb_read_io_threads=8
innodb_write_io_threads=8
innodb_buffer_pool_instances=12
max_tmp_tables=100
query_cache_size=0
query_cache_type=0
max_connections=1000
max_prepared_stmt_count=1048576
innodb_support_xa =0
innodb_flush_method=O_DIRECT
innodb_open_files=4000
thread_cache_size=100
# Remove leading # to set options mainly useful for reporting servers.
# The server defaults are faster for transactions and fast SELECTs.
# Adjust sizes as needed, experiment to find the optimal values.
# join_buffer_size = 128M
# sort_buffer_size = 2M
# read_rnd_buffer_size = 2M
# thread pool
thread_handling = pool-of-threads
log_bin_trust_function_creators=1
log_timestamps=SYSTEM
sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES
chown mysql.mysql /etc/my.cnf
bin/mysqld_safe --defaults-file=/etc/my.cnf --user=mysql &
节点3:
vi /etc/my.cnf
# For advice on how to change settings please see
# http://dev.mysql.com/doc/refman/5.7/en/server-configuration-defaults.html
# *** DO NOT EDIT THIS FILE. It's a template which will be copied to the
# *** default location during install, and will be replaced if you
# *** upgrade to a newer version of MySQL.
[mysqld]
# Remove leading # and set to the amount of RAM for the most important data
# cache in MySQL. Start at 70% of total RAM for dedicated server, else 10%.
# innodb_buffer_pool_size = 128M
# Remove leading # to turn on a very important data integrity option: logging
# changes to the binary log between backups.
# log_bin
# These are commonly set, remove the # and set as required.
# basedir = .....
# datadir = .....
# port = .....
# server_id = .....
# socket = .....
basedir =/usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101
datadir = /usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101/data
socket =/tmp/mysql.sock
wsrep_provider=/usr/local/Percona-XtraDB-Cluster-5.7.17-rel13-29.20.3.Linux.x86_64.ssl101/lib/libgalera_smm.so
wsrep_cluster_name=pxc-cluster
wsrep_cluster_address=gcomm://192.168.222.230,192.168.222.157,192.168.222.158
wsrep_node_name=pxc3
wsrep_node_address=192.168.222.158
wsrep_sst_method=xtrabackup-v2
wsrep_sst_auth=sstuser:passw0rd
pxc_strict_mode=ENFORCING
binlog_format=ROW
default_storage_engine=InnoDB
innodb_autoinc_lock_mode=2
innodb_buffer_pool_size=10G
wsrep_causal_reads=ON
log_bin=mysql-bin
server_id=13
relay_log=mysql-relay-bin
innodb_log_file_size=2G
innodb_log_buffer_size=64M
innodb_io_capacity=1000
innodb_read_io_threads=8
innodb_write_io_threads=8
innodb_buffer_pool_instances=12
max_tmp_tables=100
query_cache_size=0
query_cache_type=0
max_connections=1000
max_prepared_stmt_count=1048576
innodb_support_xa =0
innodb_flush_method=O_DIRECT
innodb_open_files=4000
thread_cache_size=100
# Remove leading # to set options mainly useful for reporting servers.
# The server defaults are faster for transactions and fast SELECTs.
# Adjust sizes as needed, experiment to find the optimal values.
# join_buffer_size = 128M
# sort_buffer_size = 2M
# read_rnd_buffer_size = 2M
# thread pool
thread_handling = pool-of-threads
log_bin_trust_function_creators=1
log_timestamps=SYSTEM
sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES
chown mysql.mysql /etc/my.cnf
bin/mysqld_safe --defaults-file=/etc/my.cnf --user=mysql &
===========================================================================
整个集群中3个MSYQL实例都关闭后,就要查看各实例的 grastate.dat 文件, 在 safe_to_bootstrap: 1
的节点上使用 bin/mysqld_safe --defaults-file=/etc/my.cnf --wsrep-new-cluster & 启动第一个实例,
其他两个实例使用 bin/mysqld_safe --defaults-file=/etc/my.cnf - & 启动。
[root@localhost data]# cat grastate.dat
# GALERA saved state
version: 2.1
uuid: 90537b00-2ea9-11e7-a7f4-2bcbc504d8d9
seqno: 13
safe_to_bootstrap: 1
bin/mysqld_safe --defaults-file=/etc/my.cnf --wsrep-new-cluster &
或者使用下面的方法启动:
/etc/init.d/mysql bootstrap-pxc #启动第一个节点
/etc/init.d/mysql start #启动其他节点
在红帽的LINUX 上可能需要这样启动:
systemctl start mysql@bootstrap.service # 启动第一个节点
proxysql 作为 PXC 集群的代理实现读写分离
一、安装
yum -y install http://www.percona.com/downloads/percona-release/redhat/0.1-4/percona-release-0.1-4.noarch.rpm
yum -y install Percona-XtraDB-Cluster-client-57
yum -y install proxysql
service proxysql start (或者: /etc/init.d/proxysql start)
二、 MSYQL(pxc)添加 proxysql 需要用到的用户
1、添加监控用户
mysql>
CREATE USER 'monitor'@'192.168.1.%' IDENTIFIED BY 'monitor';
GRANT USAGE ON *.* TO 'monitor'@'192.168.1.%';
2、添加客户端用户
CREATE USER 'test'@'192.168.1.%' IDENTIFIED BY 'test';
GRANT all ON sbtest.* TO 'test'@'192.168.1.%';
三、配置
1、全局变量配置
mysql> \R Admin>
Admin>
-- 配置提供MSYQL代理的线程数
update global_variables set variable_value=32 where variable_name='mysql-threads';
-- 配置返回信息给客户端的MYSQL服务器版本,不论实际MYSQL服务器是什么版本都用这里配置的版本来应答客户端
update global_variables set variable_value='5.7.17' where variable_name='mysql-server_version';
--修改 proxysql 管理用户名和密码
update global_variables set variable_value='admin:passwr0d' where variable_name='admin-stats_credentials';
--修改 proxysql 监控用户名和密码
update global_variables set variable_value='stats:stats' where variable_name='admin-admin_credentials';
2、配置服务器
mysql -u admin -padmin -h 127.0.0.1 -P6032
mysql> \R Admin>
Admin>
INSERT INTO mysql_servers(hostgroup_id,hostname,port,weight) VALUES (1,'192.168.1.77',3306,1000000);
INSERT INTO mysql_servers(hostgroup_id,hostname,port,weight) VALUES (2,'192.168.1.77',3306,1);
INSERT INTO mysql_servers(hostgroup_id,hostname,port,weight) VALUES (2,'192.168.1.77',3307,1000000);
INSERT INTO mysql_servers(hostgroup_id,hostname,port,weight) VALUES (2,'192.168.1.77',3308,1000000);
INSERT INTO mysql_servers(hostgroup_id,hostname,port,weight) VALUES (1,'192.168.1.77',3307,1);
监听 3306 端口的是主库,主库同时加入了服务器组1和服务器组2,在组1的权重是100万,在服务器2的权重是1.
监听3307服务器是备库,同时加入了服务器组1和服务器组2,在组2的权重是100万,在服务器1的权重是1.
监听3308服务器是备库,加入了服务器组2,在服务器2的权重是1.
通过配置路由把select 语句路由到组2(备库),主库同时加入了组1和组2,当备库正常时在备库查询100万次才会到主库上执行一次SELECT。
监听3307服务器加入服务器组1的目的,是当主库故障时可以把写的SQL路由到改备库。如果主库正常的情况下,在主库执行100万次写的SQL才会在这台备库
执行一次写的SQL。
3、配置监控
Admin> UPDATE global_variables SET variable_value='monitor' WHERE variable_name='mysql-monitor_username';
Admin> UPDATE global_variables SET variable_value='monitor' WHERE variable_name='mysql-monitor_password';
Admin> UPDATE global_variables SET variable_value='2000' WHERE variable_name IN ('mysql-monitor_connect_interval','mysql-monitor_ping_interval','mysql-monitor_read_only_interval');
4、配置用户
INSERT INTO mysql_users(username,password,default_hostgroup,transaction_persistent) VALUES ('test','test',1,1);
transaction_persistent 设置为1,确保同一个事务的SQL路由到同一个MSYQL实例,确保开启事务的SQL路由到主库。
5、配置路由
Admin> INSERT INTO mysql_query_rules (rule_id,active,username,match_digest,destination_hostgroup,apply) VALUES (1,1,'test','^SELECT',2,0);
6、Adding Galera Support
Admin>
INSERT INTO scheduler(id,interval_ms,filename,arg1,arg2,arg3,arg4)
VALUES
(1,'10000','/usr/bin/proxysql_galera_checker','127.0.0.1','6032','0',
'/var/lib/proxysql/proxysql_galera_checker.log');
Admin> LOAD SCHEDULER TO RUNTIME;
Admin>SELECT * FROM runtime_scheduler\G
*************************** 1. row ***************************
id: 1
interval_ms: 10000
filename: /usr/bin/proxysql/proxysql_galera_checker
arg1: 127.0.0.1
arg2: 6032
arg3: 0
arg4: /var/lib/proxysql/proxysql_galera_checker.log
arg5: NULL
1 row in set (0.00 sec)
7、应用配置和把配置保存到磁盘
Admin>
LOAD MYSQL USERS TO RUNTIME;
LOAD MYSQL SERVERS TO RUNTIME;
LOAD MYSQL QUERY RULES TO RUNTIME;
LOAD MYSQL VARIABLES TO RUNTIME;
LOAD ADMIN VARIABLES TO RUNTIME
Admin>
SAVE ADMIN VARIABLES TO DISK ;
SAVE MYSQL VARIABLES TO DISK ;
SAVE MYSQL QUERY RULES TO DISK;
SAVE MYSQL SERVERS TO DISK ;
SAVE MYSQL USERS TO DISK ;
8、使用 sysbentch 测试
--OLTP 只写测试
/usr/local/sysbench/bin/sysbench --time=600 --threads=30 --mysql-user=test --mysql-password=test /usr/local/sysbench/share/sysbench/oltp_write_only.lua --mysql-host=127.0.0.1 \
--mysql-port=6033 --db-driver=mysql --tables=30 --table_size=1000 --mysql_storage_engine=innodb --report-interval=10 --db-ps-mode=disable run
--OLTP 只读测试
/usr/local/sysbench/bin/sysbench --time=300 --threads=100 --mysql-user=test --mysql-password=test /usr/local/sysbench/share/sysbench/oltp_read_only.lua \
--mysql-host=127.0.0.1 --mysql-port=6033 --db-driver=mysql --tables=30 --table_size=3000000 \
--mysql_storage_engine=innodb --report-interval=10 --db-ps-mode=disable --skip_trx=on run
-- OLTP 读写测试
/usr/local/sysbench/bin/sysbench --time=600 --threads=30 --mysql-user=test --mysql-password=test /usr/local/sysbench/share/sysbench/oltp_read_only.lua \
--mysql-host=127.0.0.1 --mysql-port=6033 --db-driver=mysql --tables=30 \
--skip_trx=on --table_size=3000000 --mysql_storage_engine=innodb --report-interval=10 --db-ps-mode=disable run
在 proxysql 服务器上部署 keepalived
1、安装 keepalived
tar vxf keepalived-1.2.18.tar.gz
cd keepalived-1.2.18
./configure --prefix=/usr/local/keepalived
make
make install
cp /usr/local/keepalived/etc/rc.d/init.d/keepalived /etc/rc.d/init.d/
cp /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/
mkdir /etc/keepalived
cp /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/
cp /usr/local/keepalived/sbin/keepalived /usr/sbin/
2、配置 keepalived
cd /usr/local/keepalived/etc/keepalived/samples
cp keepalived.conf.vrrp.localcheck /etc/keepalived/keepalived.conf
cd /etc/keepalived/
-- 修改节点1 /etc/keepalived/keepalived.conf 文件配置内容如下:
vi keepalived.conf
! Configuration File for keepalived
vrrp_script chk_proxysql {
script "/usr/bin/mysqladmin -utest -ptest ping -h 127.0.0.1 -P 6033 > /dev/null 2>&1"
interval 5
weight -100
}
vrrp_instance VI_1 {
interface eth0
state MASTER
virtual_router_id 51
priority 150
authentication {
auth_type PASS
auth_pass hn@91A
}
virtual_ipaddress {
192.168.1.201/24
}
track_script {
chk_proxysql
}
}
vrrp_instance VI_2 {
interface eth0
state BACKUP
virtual_router_id 52
priority 100
authentication {
auth_type PASS
auth_pass hn@91A
}
virtual_ipaddress {
192.168.1.202/24
}
}
--节点二的 keepalived.conf 配置文件
[root@localhost keepalived]# cat keepalived.conf
! Configuration File for keepalived
vrrp_script chk_proxysql {
script "/usr/bin/mysqladmin -utest -ptest ping -h 127.0.0.1 -P 6033 > /dev/null 2>&1"
interval 5
weight -100
}
vrrp_instance VI_1 {
interface eth0
state BACKUP
virtual_router_id 51
priority 100
authentication {
auth_type PASS
auth_pass hn@91A
}
virtual_ipaddress {
192.168.1.201/24
}
}
vrrp_instance VI_2 {
interface eth0
state MASTER
virtual_router_id 52
priority 150
authentication {
auth_type PASS
auth_pass hn@91A
}
virtual_ipaddress {
192.168.1.202/24
}
track_script {
chk_proxysql
}
}
3、启动 proxysql 和 keepalived
/etc/init.d/proxysql start
/etc/init.d/keepalived start
4、在节点一关闭 proxysql
/etc/init.d/proxysql stop
--检查 /var/log/messages 日志看到已经在节点1删除VIP 1
tail -f /var/log/messages
Jun 22 11:09:29 localhost Keepalived_vrrp[26465]: bogus VRRP packet received on eth0 !!!
Jun 22 11:09:29 localhost Keepalived_vrrp[26465]: VRRP_Instance(VI_1) Dropping received VRRP packet...
Jun 22 11:09:40 localhost Keepalived_vrrp[26465]: VRRP_Script(chk_proxysql) failed
Jun 22 11:09:42 localhost Keepalived_vrrp[26465]: VRRP_Instance(VI_1) Received higher prio advert
Jun 22 11:09:42 localhost Keepalived_vrrp[26465]: VRRP_Instance(VI_1) Entering BACKUP STATE
Jun 22 11:09:42 localhost Keepalived_vrrp[26465]: VRRP_Instance(VI_1) removing protocol VIPs.
Jun 22 11:09:42 localhost Keepalived_healthcheckers[26464]: Netlink reflector reports IP 192.168.1.201 removed
-- VIP 1已经不在节点1上
[root@localhost samples]# ip add|grep inet
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
inet 192.168.1.91/24 brd 192.168.1.255 scope global eth0
inet6 fe80::4065:cff:fea4:521b/64 scope link
inet6 fe80::7404:63ff:fe85:c20c/64 scope link
-- VIP 1已经漂移到 节点2
[root@localhost keepalived]# ip add|grep inet
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
inet 192.168.1.92/24 brd 192.168.1.255 scope global eth0
inet 192.168.1.202/24 scope global secondary eth0
inet 192.168.1.201/24 scope global secondary eth0
inet6 fe80::30c8:b0ff:fe80:32b7/64 scope link
5、在节点1 重新启动 proxysql VIP 1 自动漂移到节点1
/etc/init.d/proxysql start
tail -f /var/log/messages
Jun 22 11:39:18 localhost Keepalived_vrrp[26465]: bogus VRRP packet received on eth0 !!!
Jun 22 11:39:18 localhost Keepalived_vrrp[26465]: VRRP_Instance(VI_2) ignoring received advertisment...
Jun 22 11:39:18 localhost Keepalived_vrrp[26465]: VRRP_Instance(VI_1) Entering MASTER STATE
Jun 22 11:39:18 localhost Keepalived_vrrp[26465]: VRRP_Instance(VI_1) setting protocol VIPs.
Jun 22 11:39:18 localhost Keepalived_vrrp[26465]: VRRP_Instance(VI_1) Sending gratuitous ARPs on eth0 for 192.168.1.201
Jun 22 11:39:18 localhost Keepalived_vrrp[26465]: ip address associated with VRID not present in received packet : 192.168.1.201
Jun 22 11:39:18 localhost Keepalived_vrrp[26465]: one or more VIP associated with VRID mismatch actual MASTER advert
Jun 22 11:39:18 localhost Keepalived_vrrp[26465]: bogus VRRP packet received on eth0 !!!
Jun 22 11:39:18 localhost Keepalived_vrrp[26465]: VRRP_Instance(VI_1) Dropping received VRRP packet...
Jun 22 11:39:18 localhost Keepalived_healthcheckers[26464]: Netlink reflector reports IP 192.168.1.201 added
[root@localhost samples]# ip add|grep inet
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
inet 192.168.1.91/24 brd 192.168.1.255 scope global eth0
inet 192.168.1.201/24 scope global secondary eth0
inet6 fe80::4065:cff:fea4:521b/64 scope link
inet6 fe80::7404:63ff:fe85:c20c/64 scope link