杀会话:
SELECT concat('KILL ',id,';') FROM information_schema.processlist WHERE user='ordercenter' INTO OUTFILE '/tmp/kill_id.txt';
source /tmp/kill_id.txt;
mysqladmin -uroot -pYDtg@612 processlist
mysqladmin -uroot -pYDtg@612 processlist |grep Query
Waiting for table level lock
Sorting result
removing tmp table
Copying to tmp table
query end
show status like 'table_locks%'
SHOW VARIABLES LIKE 'query_cache%';
select table_name,engine from information_schema.tables where engine='MYISAM' and table_schema='orders';
+-----------------------+--------+
| table_name | engine |
+-----------------------+--------+
| erp_fail_log | MyISAM |
| not_discount_order_id | MyISAM |
| send_jia_order | MyISAM |
+-----------------------+--------+
0
1. 检查代码里面有没有事务没关闭的情况 就是代码只有 事务开始 没有事务结束(或者是事务开始了 执行中然后php代码没做异常处理,还没到事务结束语句接一场了,
这个时候事务就没正常关闭,最好在代码里做异常处理,确保事务开始必须有结束)
2. 事务里面尽量不要myisam和innodb 引擎用(如一个事务里有好几条sql,然后有的语句涉及的表未myisam有的为Innodb) – ghostwwl 2013-05-13
mysql> SHOW VARIABLES LIKE ‘%tmp_table_size%’
-> ;
+—————-+———–+
| Variable_name | Value |
+—————-+———–+
| tmp_table_size | 536870912 |
+—————-+———–+
1 row in set (0.00 sec)
mysql> SHOW VARIABLES LIKE ‘%max_heap_table_size%’
-> ;
+———————+———–+
| Variable_name | Value |
+———————+———–+
| max_heap_table_size | 536870912 |
+———————+———–+
1 row in set (0.00 sec)
mysql>
查看:
SHOW GLOBAL STATUS LIKE ‘%Created_tmp_disk_tables%’
SHOW GLOBAL STATUS LIKE ‘%Created_tmp_tables%’
比例关系:
Created_tmp_disk_tables/Created_tmp_tables<5%
6526/2177=2.997703261368856
正常范围之内
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ
Alter table ATTACHMENTDATA engine = InnoDB;
create table ch (id int,name varchar(20),cr_date datetime) engine = myisam;
insert into ch values (1,'chen',sysdate());
Alter table ch engine = InnoDB;
new_order
orderGroup
offline_order
############
Copying to tmp table:线程将数据写入内存中的临时表。
Copying to tmp table on disk The temporary result set was larger than tmp_table_size and the
thread is now changing the in memory-based temporary table to a disk based one to save memory.
set global tmp_table_size=209715200 (200M)
show variables like '%tmp%';
查询tmp_table_size 为默认16M
show index from orders.send_jia_order;
SELECT STATE, FORMAT(DURATION, 6) AS DURATION
FROM INFORMATION_SCHEMA.PROFILING
WHERE QUERY_ID = 2 ORDER BY SEQ;
################
Sorting result
mysql对于排序,使用了两个变量来控制sort_buffer_size和 max_length_for_sort_data。可以通过增大这两个属性值加快ORDER BY或GROUP BY操作。
################
show status like '%tmp%';
################
比较常用的列:
trx_id:InnoDB存储引擎内部唯一的事物ID
trx_status:当前事务的状态
trx_status:事务的开始时间
trx_requested_lock_id:等待事务的锁ID
trx_wait_started:事务等待的开始时间
trx_weight:事务的权重,反应一个事务修改和锁定的行数,当发现死锁需要回滚时,权重越小的值被回滚
trx_mysql_thread_id:MySQL中的进程ID,与show processlist中的ID值相对应
trx_query:事务运行的SQL语句
use information_schema
SELECT * FROM INNODB_TRX\G;
锁等待和持有锁的相互关系
SELECT * FROM INNODB_LOCK_WAITS\G;
requesting_trx_id: B15
requested_lock_id: B15:0:32777:2
blocking_trx_id: B14
blocking_lock_id: B14:0:32777:2
1 row in set (0.03 sec)
ERROR:
No query specified
通过视图INNODB_LOCK_WAITS可以清晰的看到B14持有锁,而B15处于锁等待;
show processlist
select distinct
mysql.innodb_index_stats.table_name,
mysql.innodb_index_stats.index_name
from
mysql.innodb_index_stats
where
concat(mysql.innodb_index_stats.index_name,
mysql.innodb_index_stats.table_name) not in (select
concat(information_schema.statistics.index_name,
information_schema.statistics.table_name)
from
information_schema.statistics)
and mysql.innodb_index_stats.index_name <> 'GEN_CLUST_INDEX';
参考:
http://www.percona.com/blog/2012/06/30/find-unused-indexes/
#############################
orders 库表修改表引擎
mysql> select version();
+------------+
| version() |
+------------+
| 5.5.32-log |
create table not_discount_order_id_bak as select * from not_discount_order_id;
Alter TABLE not_discount_order_id RENAME TO not_discount_order_id_1;
Alter TABLE not_discount_order_id_bak RENAME TO not_discount_order_id;
create table not_discount_order_id_2 as select * from not_discount_order_id;
Alter TABLE not_discount_order_id RENAME TO not_discount_order_id_1;
Alter TABLE not_discount_order_id_bak RENAME TO not_discount_order_id;
alter table not_discount_order_id add primary key (id);
alter table not_discount_order_id auto_increment=33;
desc not_discount_order_id;
alter table not_discount_order_id MODIFY id INT(11) AUTO_INCREMENT=33;
alter table not_discount_order_id MODIFY id AUTO_INCREMENT=33;
add_time | timestamp | NO | | 0000-00-00 00:00:00
+-----------------------+--------+
| erp_fail_log | MyISAM |
| not_discount_order_id | MyISAM |
| send_jia_order | MyISAM |
+-----------------------+--------+
CREATE TABLE `erp_fail_log` (
`id` int(10) NOT NULL AUTO_INCREMENT COMMENT '鑷??涓婚敭',
`sync_status` int(2) NOT NULL DEFAULT '0' COMMENT '鍚屾?鐘舵?侊紝0寰呮彁浜わ紝1宸叉彁浜ゃ?,
`add_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '鍑洪敊鏃堕棿',
`error_info` text COMMENT 'ERP鎶涘嚭閿欒?淇℃伅',
`group_no` varchar(25) NOT NULL COMMENT '璁㈠崟缂栧彿',
`sync_type` int(2) NOT NULL COMMENT '鍚屾?绫诲瀷锛宸叉敹娆撅紝2宸插彂璐э紝3宸查??娆俱?,
`sync_param` text NOT NULL COMMENT '闇?鍚屾?鍙傛暟',
`success_time` timestamp NULL DEFAULT NULL COMMENT '鎴愬姛鏃堕棿',
PRIMARY KEY (`id`)
) ENGINE=MyISAM AUTO_INCREMENT=113 DEFAULT CHARSET=utf8 COMMENT='鍚屾?ERP鍑洪敊,瀹氭椂閲嶆帹'
1 row in set (0.00 sec)
create table erp_fail_log_bak as select * from erp_fail_log;
alter table erp_fail_log rename to erp_fail_log_1;
alter table erp_fail_log_bak rename to erp_fail_log;
添加自增长属性:
desc erp_fail_log;
alter table erp_fail_log add primary key (id);
alter table erp_fail_log MODIFY id INT UNSIGNED AUTO_INCREMENT;
CREATE TABLE `send_jia_order` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '鑷??涓婚敭',
`shop_id` int(11) DEFAULT NULL COMMENT '搴楅摵ID',
`provider_id` int(11) DEFAULT '0' COMMENT '渚涘簲鍟咺D',
`order_id` int(11) DEFAULT NULL COMMENT '璁㈠崟ID',
`order_code` varchar(25) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL COMMENT '璁㈠崟鍙凤紝璇ュ彿鏆傚畾涓虹郴缁熸棩鏈鑷??id',
`item_id` int(11) NOT NULL COMMENT '鍟嗗搧ID',
`item_name` varchar(255) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT '鐗╁搧鍚嶇О',
`item_price` int(11) DEFAULT '0' COMMENT '鍟嗗搧瀹為檯閿?鍞?环锛屾墸闄や績閿?鍚庣殑浠锋牸',
`total_amount` int(11) DEFAULT '0' COMMENT '鎬婚噾棰漒n璇ヨ?褰曟槸閫氳繃瀵硅?group鎵?鍖呭惈鐨勬墍鏈塷rder閲戦?鐨勫姞娉曠粨鏋淺n鍙?兘涔熷寘鍚?簡鎵i櫎淇冮攢鐨勮繑鐜伴噾閮ㄥ垎\npreShopOrder_orderGroup',
`bonus_pay` int(11) DEFAULT '0' COMMENT '绾㈠寘鎶垫墸閲戦?',
`gold_pay` int(11) DEFAULT '0' COMMENT '鍚堝苟鍚庣殑绉?垎璐,
`item_number` int(11) DEFAULT NULL COMMENT '瀵瑰悓涓?娆句骇鍝佺殑璐?拱涓?暟',
`nick_name` varchar(45) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL COMMENT '鐢ㄦ埛鍚嶇О',
`receiver_name` varchar(255) DEFAULT NULL COMMENT '鏀惰揣浜哄?鍚,
`receiver_address` varchar(255) DEFAULT NULL COMMENT '鏀惰揣鍦板潃',
`receiver_phone` varchar(255) DEFAULT NULL COMMENT '鏀惰揣鎵嬫満',
`receiver_mobile` varchar(255) DEFAULT NULL COMMENT '鏀惰揣鐢佃瘽',
`receiver_zip` varchar(255) DEFAULT NULL COMMENT '閭?紪',
`receiver_memo` varchar(900) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL COMMENT '鐢ㄦ埛澶囨敞',
`createtime` timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT '涓嬪崟鏃堕棿',
`paytime` timestamp NULL DEFAULT NULL COMMENT '鏀?粯鏃堕棿',
`ship_amount` int(11) DEFAULT '0' COMMENT '鍚堝苟鐨勮繍璐筡n杩欎釜璐圭敤鍙?互琚?晢瀹朵慨鏀,
`sync_status` int(2) DEFAULT '0' COMMENT '鍚屾?鐘舵?侊紝0:寰呮彁浜1:鎺ㄩ?佹垚鍔2:涓嶉渶鎺ㄩ?,
`sync_refund` int(2) DEFAULT '0' COMMENT '鍙栨秷鍙戣揣鎺ㄩ?侊細0锛屽緟鎺ㄩ?侊紱1锛屾帹閫佹垚鍔,
PRIMARY KEY (`id`),
KEY `idx_order_id` (`order_id`),
KEY `idx_shop_id` (`shop_id`)
) ENGINE=MyISAM AUTO_INCREMENT=10030588 DEFAULT CHARSET=utf8;
create table send_jia_order_bak as select * from send_jia_order;
Alter TABLE send_jia_order RENAME TO not_send_jia_order_1;
Alter TABLE send_jia_order_bak RENAME TO send_jia_order;
添加自增长属性:
desc send_jia_order;
show create table send_jia_order\G;
alter table send_jia_order add primary key (id);
alter table send_jia_order MODIFY id INT UNSIGNED AUTO_INCREMENT;
update not_discount_order_id set id=32 where not_discount_order_id='1000216260200';
update not_discount_order_id set id=28 where not_discount_order_id='1000216260301';
update not_discount_order_id set id=29 where not_discount_order_id='1000216260402';
update not_discount_order_id set id=30 where not_discount_order_id='1000216260503';
update not_discount_order_id set id=31 where not_discount_order_id='1000216260604';
#######################
10.10.20.109
数据库:orders
orderGroup
offline_order
将上2个表复制到
数据库服务器:10.10.10.23
数据库:shop
mysqldump -uroot -pYDtg@612 --default-character-set=utf8 orders orderGroup>orderGroup.sql
mysqldump -uroot -pYDtg@612 --default-character-set=utf8 orders offline_order>offline_order.sql
mysql -uroot -p --default-character-set=utf8 shop <orderGroup.sql
mysql -uroot -p --default-character-set=utf8 shop <offline_order.sql
mysql -uroot -p --default-character-set=utf8 shop <creditsProduce.sql
10.10.10.23:mysql root密码:
QeeKa09rOOt.
----------------------------------
数据库服务器:10.10.20.115
数据库:point_pool
表:creditsProduce
将上creditsProduce表复制到
数据库服务器:10.10.10.23
数据库:shop
mysqldump -uroot -p --default-character-set=utf8 point_pool creditsProduce>creditsProduce.sql
YDtg@612
mysql -uroot -p --default-character-set=utf8 shop <creditsProduce.sql
scp -p11658 newsinfo_1114.sql root@10.10.10.24:/usr/local/mysql/data/
mysqldump -h 10.10.10.23 -uroot -p --opt --no-data -B newsinfo phpmyadmin shop shop_umege > /usr/backup/old_newmall_20141030.sql
968 sz old_newmall_20141030.sql
##################
mysqldump --single-transaction --flush-logs --master-data=2 -default-character-set=utf8 > backup.sql
这样生成的增量二进制日志文件比如为mysql-bin.000003,那么恢复数据时如下:
mysql -uroot -p --default-character-set=utf8 shop <orderGroup.sql
mysqlbinlog mysql-bin.000003 | mysql -uroot -pPwd
忽略错误导入:
mysql -uroot -p123456 -f database < filename.sql
-f 参数,就是force的意思,忽略错误,强制继续运行。
##############
C:\Program Files\Java\jdk1.7.0_51\
orders 10.10.20.109
grant all on *.* to 'adminch'@'192.168.99.%' identified by 'adminch123';
show variables like 'autocommit';
set autocommit = 0;
值0和OFF都是一样的,当然,1也表示ON。通过以上设置autocommit=0,关闭自动提交
PDATE student SET time=NULL WHERE id=11;
update orders.order_log set add_time=NULL where add_time='0000-00-00';
#############3
1.查看数据库表基本信息。
select * from information_schema.TABLES where information_schema.TABLES.TABLE_SCHEMA = '数据库名' and information_schema.TABLES.TABLE_NAME = '表名';
2.查看mysql数据库大小
SELECT (sum(DATA_LENGTH)+sum(INDEX_LENGTH))/1024/1024 as mb FROM information_schema.TABLES where TABLE_SCHEMA='point_pool';
得到的结果是以字节为单位,除1024为K,除1048576(=1024*1024)为M。
3.查看表的最后mysql修改时间
select TABLE_NAME,UPDATE_TIME from information_schema.TABLES where TABLE_SCHEMA='数据库名' order by UPDATE_TIME desc limit 1;
select TABLE_NAME,UPDATE_TIME from information_schema.TABLES where TABLE_SCHEMA='数据库名' and information_schema.TABLES.TABLE_NAME = '表名';
########################
分表备份
[root@localhost script]# more 20_103_fenbiao.sh
#!/bin/bash
for database in `mysql -uusvr_backup -p'B1@k&6agU#p' -h10.10.20.103 -e "show databases;"|grep -Evi "database|infor|perfor|ConfigDB|test|mysql"`
do [ ! -d /var/backup2/dump/$(date +%F)/$database ] && mkdir -p /var/backup2/dump/$(date +%F)/$database
TABLE=`mysql -uusvr_backup -p'B1@k&6agU#p' -h10.10.20.103 -e "show tables from $database;"|sed '1d'`
for table in $TABLE
do
mysqldump -uusvr_backup -p'B1@k&6agU#p' -h10.10.20.103 $database $table |gzip >/var/backup2/dump/$(date +%F)/$database/${database}_${table}_$(date +%F).sql.gz
done
done
[root@localhost script]# more backup27071.sh
datedir="/var/backup2/mongobackup/20_153/27017_wed/$(date +%F)"
if [ ! -d $datedir]; then
mkdir -p $datedir
fi
mongodump -h 10.10.20.153 --port 27017 -d wed -o /var/backup2/mongobackup/20_153/$datedir >/var/backup2/mongobackup/20_153/27017_wed.log
if [ $? -eq 0 ];
then
echo "back successfully!"
echo "back successfully!" >>/var/backup2/mongobackup/20_153/27017_wed.log
else
echo "back failure!"
echo "back successfully!" >>/var/backup2/mongobackup/20_153/27017_wed.log
fi
[root@localhost script]# more fenku.sh
dd=$( date -u +%y%m%d)
for dbname in `mysql -uusvr_backup -p'B1@k&6agU#p' -h10.10.20.103 -e "show databases;"|grep -Evi "database|infor|perfor|ConfigDB|test"`
do
mysqldump -uusvr_backup -p'B1@k&6agU#p' -h10.10.20.103 --master-data=2 --default-character-set=utf8 --single-transaction $dbname|gzip >/backup/dump/20_103_dump_${dbna
me}_$dd.sql.gz
scp -P 11658 /backup/dump/20_103_dump_${dbname}_$dd.sql.gz 10.10.20.115:/var/backup2/dump/20_103/
done
find /backup/dump/ -mtime +10 -type f -name 20_103_dump_*.sql.gz -exec rm -f {} \;
[root@localhost script]# more xfull.sh
dd=$( date -u +%y%m%d)
innobackupex --host=10.10.20.103 --user=usvr_backup --password='B1@k&6agU#p' --defaults-file=/etc/my.cnf --slave-info --stream=tar /backup/xtrabackup/full/ 2>/backu
p/xtrabackup/20_103_xfull_$dd.log | gzip > /backup/xtrabackup/full/20_103_xfull_$dd.tar.gz
scp -P 11658 /backup/xtrabackup/full/20_103_xfull_$dd.tar.gz 10.10.20.115:/var/backup2/xbackup/20_103/
find /backup/xtrabackup/full/ -mtime +10 -type f -name 20_103_xfull_*.tar.gz -exec rm -f {} \;
[root@localhost script]# more backuplog.sh
for log in `cat /usr/local/mysql/logs/mysql-bin.index |cut -c 23-39`
do
if [ ! -f "/backup/logbackup/$log" ]; then
cp /usr/local/mysql/logs/$log /backup/logbackup/
fi
done
ssh无密码访问、传输
########################
慢查询分析
操作系统:CentOS 5.X 64位
MySQL版本:mysql-5.5.35
MySQL配置文件:/etc/my.cnf
MySQL 数据库存放目录:/data/mysql
show variables like '%slow%'; #查看MySQL慢查询是否开启
set global slow_query_log=ON; #开启MySQL慢查询功能
show variables like "long_query_time"; #查看MySQL慢查询时间设置,默认10秒
set global long_query_time=5; #修改为记录5秒内的查询
select sleep(6); #测试MySQL慢查询
show variables like "%slow%"; #查看MySQL慢查询日志路径
show global status like '%slow%'; #查看MySQL慢查询状态
slow-query-log = on #开启MySQL慢查询功能
slow_query_log_file = /data/mysql/127-slow.log #设置MySQL慢查询日志路径
long_query_time = 5 #修改为记录5秒内的查询,默认不设置此参数为记录10秒内的查询
log-queries-not-using-indexes = on #记录未使用索引的查询
mysql 5.1:
show variables like '%slow%';
+---------------------+--------------------------------+
| Variable_name | Value |
+---------------------+--------------------------------+
| log_slow_queries | ON |
| slow_launch_time | 2 |
| slow_query_log | ON |
| slow_query_log_file | /usr/local/mysql/logs/slow.log |
+---------------------+--------------------------------+
二、安装使用MySQL慢查询分析工具mysqlsla
1、安装mysqlsla
yum install wget perl perl-DBI perl-DBD-MySQL mysql #执行此命令安装依赖包
cd /usr/local/src #进入软件包存放目录
wget http://hackmysql.com/scripts/mysqlsla-2.03.tar.gz #下载mysqlsla
tar zxvf mysqlsla-2.03.tar.gz #解压
cd mysqlsla-2.03 #进入安装目录
perl Makefile.PL #配置
make #编译
make install #安装
2、使用mysqlsla分析MySQL慢查询日志
#查询记录最多的20个sql语句,并写到select.log中去
mysqlsla -lt slow --sort t_sum --top 20 /data/mysql/127-slow.log >/tmp/select.log
#统计慢查询文件为/data/mysql/127-slow.log的所有select的慢查询sql,并显示执行时间最长的100条sql,并写到sql_select.log中去
mysqlsla -lt slow -sf "+select" -top 100 /data/mysql/127-slow.log >/tmp/sql_select.log
#统计慢查询文件为/data/mysql/127-slow.log的数据库为mydata的所有select和update的慢查询sql,并查询次数最多的100条sql,并写到sql_num.sql中去
mysqlsla -lt slow -sf "+select,update" -top 100 -sort c_sum -db mydata /data/mysql/127-slow.log >/tmp/sql_num.log
################# percona-toolkit 慢查询监控#################
http://blog.itpub.net/26355921/viewspace-1165495/
http://blog.itpub.net/26355921/viewspace-774506/
http://www.percona.com/doc/percona-toolkit/2.2/pt-query-digest.html
wget percona.com/get/percona-toolkit.rpm
首先安装percona-toolkit-2.2.7-1.noarch.rpm包,可能会报错:
perl(DBD::mysql) >= 1.0 is needed by percona-toolkit-2.2.7-1.noarch.rpm
perl(IO::Socket::SSL) is needed by percona-toolkit-2.2.7-1.noarch.rpm
yum list|grep DBD
yum list|grep Socket|grep SSL
yum -y install perl-DBD-MySQL.x86_64
yum -y install perl-IO-Socket-SSL.noarch
rpm -ivh percona-toolkit-2.2.7-1.noarch.rpm
unzip Query-Digest-UI-master.zip
mv Query-Digest-UI-master /var/www/html/slow
yum install Apache mod_php php_pdo
yum install php-mysql php-dba php-gd php-pdo php-bcmatch php-cli
[root@/var/www/html/slow]mysql -uroot -p123456 < install.sql
[root@/var/www/html/slow]mysql -uroot -p123456
mysql > grant all on slow_query_log.* to 'slow'@'%' identified by 'test';
mysql > grant select on *.* to 'slow'@'%';
mysql > grant all on slow_query_log.* to 'slow'@'localhost';
mysql > grant select on *.* to 'slow'@'localhost';
flush privileges;
ERROR 1045 (28000): Access denied for user 'root'@'%' (using password: YES)
select * from mysql.user where user='root' and host='%'\G
权限报错是因为%登录的root没有grant option的权限
mysql> update mysql.user set Grant_priv='Y' where user='root' and host='%';
flush privileges;
5.将慢查询日志通过pt-query-digest分析后存入db.example.com主机上的数据库中:
pt 版本小于2.2版本的语句:
pt-query-digest --user=slow --password=test \
--review h=db.example.com,D=slow_query_log,t=global_query_review \
--review-history h=db.example.com,D=slow_query_log,t=global_query_review_history \
--no-report --limit=0% \
--filter=" \$event->{Bytes} = length(\$event->{arg}) and \$event->{hostname}=\"$HOSTNAME\"" \
/root/test/localhost-slow.log
(1)直接分析慢查询文件:
pt-query-digest /root/slow.log > /root/slow_report.log
(2)分析最近12小时内的查询:
pt-query-digest --since=12h slow.log > slow_report2.log
(3)分析指定时间范围内的查询:
pt-query-digest slow.log --since '2014-04-17 09:30:00' --until '2014-04-17 10:00:00'> > slow_report3.log
(4)分析指含有select语句的慢查询
pt-query-digest--filter '$event->{fingerprint} =~ m/^select/i' slow.log> slow_report4.log
(5) 针对某个用户的慢查询
pt-query-digest--filter '($event->{user} || "") =~ m/^root/i' slow.log> slow_report5.log
(6) 查询所有所有的全表扫描或full join的慢查询
pt-query-digest--filter '(($event->{Full_scan} || "") eq "yes") ||(($event->{Full_join} || "") eq "yes")' slow.log> slow_report6.log
(7)把查询保存到query_review表
pt-query-digest --user=root –password=abc123 --review h=localhost,D=test,t=query_review--create-review-table slow.log
(8)把查询保存到query_history表
pt-query-digest --user=root –password=abc123 --review h=localhost,D=test,t=query_ history--create-review-table slow.log_20140401
pt-query-digest --user=root –password=abc123--review h=localhost,D=test,t=query_history--create-review-table slow.log_20140402
(9)通过tcpdump抓取mysql的tcp协议数据,然后再分析
tcpdump -s 65535 -x -nn -q -tttt -i any -c 1000 port 3306 > mysql.tcp.txt
pt-query-digest --type tcpdump mysql.tcp.txt> slow_report9.log
(10)分析binlog
mysqlbinlog mysql-bin.000093 > mysql-bin000093.sql
pt-query-digest --type=binlog mysql-bin000093.sql > slow_report10.log
(11)分析general log
pt-query-digest --type=genlog localhost.log > slow_report11.log
#############
mysqldump -uroot -pYDtg@612 --host=10.232.74.192 -P3306 --all-databases --flush-privileges > /data/backup/backup_mmzbprod_0327.sql
mysqldump -uroot -pYDtg@612 -A --default-character-set=utf8 --flush-privileges > /home/mysql/mysqlfull.sql
mysqldump -uroot -pYDtg@612 --default-character-set=utf8 weixin > /home/mysql/weixin.sql
mysqldump -uroot -pYDtg@612 --default-character-set=utf8 wedding > /home/mysql/wedding.sql
三. 指定自动清理日志的参数
my.cnf 配置文件中的参数 expire_logs_days 默认值为0,表示“不要自动删除”
启动时和二进制日志循环时可能删除
语法:mysql -u -p进去之后 执行:
set GLOBAL expire_logs_days=5;
但是设置了 不会马上生效
需要执行:
1.执行 flush logs;
编译安装
cmake \
-DCMAKE_INSTALL_PREFIX=/usr/local/mysql_weixin \
-DMYSQL_DATADIR=/usr/local/mysql_weixin/data \
-DSYSCONFDIR=/usr/local/mysql_weixin \
-DWITH_MYISAM_STORAGE_ENGINE=1 \
-DWITH_INNOBASE_STORAGE_ENGINE=1 \
-DWITH_MEMORY_STORAGE_ENGINE=1 \
-DWITH_READLINE=1 \
-DMYSQL_UNIX_ADDR=/usr/local/mysql_weixin/mysql.sock \
-DMYSQL_TCP_PORT=3307 \
-DENABLED_LOCAL_INFILE=1 \
-DWITH_PARTITION_STORAGE_ENGINE=1 \
-DEXTRA_CHARSETS=all \
-DDEFAULT_CHARSET=utf8 \
-DDEFAULT_COLLATION=utf8_general_ci \
-DYSQL_DYNAMIC_PLUGIN=1 \
-DENABLE_DOWNLOADS=0
[client]
port=3307
user=mysql
socket=/usr/local/mysql_weixin/mysql.sock
character-set-server=utf8
[mysqld]
server-id=1
port=3307
user=mysql
socket=/usr/local/mysql_weixin/mysql.sock
basedir=/usr/local/mysql_weixin
datadir=/usr/local/mysql_weixin/data
skip-external-locking
skip-name-resolve
default-storage-engine=innodb
interactive_timeout=100
wait_timeout=100
connect_timeout=20
back_log=500
myisam_recover
event_scheduler=on
character-set-server=utf8
weixinjia
weixinjia20150429
账号:wenshuai_r
密码:通过IMO发给你,如果你不在线没收到,直接找我
连接IP和端口(跳板机):10.10.10.39: 20109
118:
select user,host,password from mysql.user;
grant select,usage on NEWMALL.* to 'zhouhaijun_r'@'10.10.10.39' identified by 'zhouhaijun!@#';
grant select,usage on NEWMALL_LOG.* to 'zhouhaijun_r'@'10.10.10.39';
grant select,usage on uc.* to 'zhouhaijun_r'@'10.10.10.39';
grant select,usage on point_pool.* to 'zhouhaijun_r'@'10.10.10.39';
grant select,usage on point_product.* to 'zhouhaijun_r'@'10.10.10.39';
grant select,usage on zhifu.* to 'zhouhaijun_r'@'10.10.10.39';
grant select,usage on NEWMALL.* to 'liwentao_r'@'10.10.10.39' identified by 'liwentao!@#';
grant select,usage on NEWMALL.* to 'tongshan_r'@'10.10.10.39' identified by 'tongshan_r@#';
grant select,usage on NEWMALL_LOG.* to 'tongshan_r'@'10.10.10.39';
grant select,usage on point_pool.* to 'tongshan_r'@'10.10.10.39';
grant select,usage on point_product.* to 'tongshan_r'@'10.10.10.39';
grant select,usage on uc.* to 'tongshan_r'@'10.10.10.39';
grant select,usage on zhifu.* to 'tongshan_r'@'10.10.10.39';
mysql -utongshan_r -p'tongshan_r@#' -h10.10.10.39 -P 20118
flush privileges;
mysql -uzhouhaijun_r -p'zhouhaijun!@#' -h10.10.10.39 -P 20118
mysql -uzhouhaijun_r -p'zhouhaijun!@#' -h10.10.10.39 -P 20118
109:
grant select,usage on orders.* to 'zhouhaijun_r'@'10.10.10.39' identified by 'zhouhaijun!@#';
grant select,usage on orders.* to 'wenshuai_r'@'10.10.10.39' identified by 'wenshuai!@#';
flush privileges;
grant select,usage on orders.* to 'jiangmingwu_r'@'10.10.10.39' identified by 'jiangmingwu!@#';
flush privileges;
grant select,usage on orders.* to 'liwentao_r'@'10.10.10.39' identified by 'liwentao!@#';
flush privileges;
grant select,usage on orders.* to 'humaohua_r'@'10.10.10.39' identified by 'humaohua!@#';
flush privileges;
grant select,usage on orders.* to 'uws_search'@'10.10.10.39' identified by 'Se$@9akrMs@123';
flush privileges;
grant all on *.* to 'chenh'@'10.10.10.39' identified by 'chenh!@#';
flush privileges;
mysql -uwenshuai_r -p'wenshuai!@#' -h10.10.10.39 -P 20109
mysql -uwenshuai_r -p'wenshuai!@#' -h10.10.10.39 -P 33109
138:
grant select,usage on *.* to 'mazhuguo_r'@'10.10.10.39' identified by 'mazhuguo!@#';
flush privileges;
mysql -umazhuguo_r -p'mazhuguo!@#' -h10.10.10.39 -P 20138
20138
115:33118
mysql -uroot -p -P 33118 -S /var/databases/newmall/mysql.sock
grant select,usage on NEWMALL.* to 'un_zxyong_r'@'10.10.10.39' identified by 'un_zxyong!@#';
grant select,usage on point_pool.* to 'un_zxyong_r'@'10.10.10.39';
grant select,usage on point_product.* to 'un_zxyong_r'@'10.10.10.39' ;
grant select,usage on uc.* to 'un_zxyong_r'@'10.10.10.39';
flush privileges;
mysql -uun_zxyong_r -p'un_zxyong!@#' -h10.10.10.39 -P 33118
144:
grant select,usage on *.* to 'zhuhai_r'@'10.10.10.39' identified by 'zhuhai!@#';
flush privileges;
grant select,usage on aabb.* to 'luyaochun_r'@'10.10.10.39' identified by 'luyaochun!@#';
flush privileges;
grant select,usage on *.* to 'huangjianfeng_r'@'10.10.10.39' identified by 'huangjianfeng!@#';
flush privileges;
grant select,usage on *.* to 'jiangwei_r'@'10.10.10.39' identified by 'jiangwei!@#';
flush privileges;
mysql -uzhuhai_r -p'zhuhai!@#' -h10.10.10.39 -P 20144
mysql -uluyaochun_r -p'luyaochun!@#' -h10.10.10.39 -P 20144
115:33138
grant select,usage on wallet.* to 'un_liunanjing_r'@'10.10.10.39' identified by 'un_liunanjing!@#';
grant select,usage on wallet_log.* to 'un_liunanjing_r'@'10.10.10.39';
flush privileges;
grant select,usage on *.* to 'wangshengzhang_r'@'10.10.10.39' identified by 'wangshengzhang!@#';
flush privileges;
grant select,usage on *.* to 'zhuxiaofeng_r'@'10.10.10.39' identified by 'zhuxiaofeng!@#';
flush privileges;
mysql -uwangshengzhang_r -p'wangshengzhang!@#' -h10.10.10.39 -P 33138
115:33118
grant select,usage on NEWMALL.* to 'tandanfeng_r'@'10.10.10.39' identified by 'tandanfeng!@#';
grant select,usage on point_pool.* to 'tandanfeng_r'@'10.10.10.39' ;
grant select,usage on point_product.* to 'tandanfeng_r'@'10.10.10.39' ;
grant select,usage on uc.* to 'tandanfeng_r'@'10.10.10.39' ;
flush privileges;
115:33109
grant select,usage on appadmin.* to 'wangpengfei_r'@'10.10.10.39' identified by 'wangpengfei_r!@#';
flush privileges;
grant select,usage on order.* to 'wuxiaodong_r'@'10.10.10.39' identified by 'wuxiaodong_r!@#';
flush privileges;
grant select,usage on orders.* to 'tongshan_r'@'10.10.10.39' identified by 'tongshan!@#';
flush privileges;
NEWMALL
point_pool
point_product
uc
155:
grant select,usage on NEWMALL.* to 'selectorder'@'10.10.10.39' identified by 'selectorder@#1';
flush privileges;
23307
delete from mysql.user where user='reader';
flush privileges;
select user,host,password from mysql.user where user='reader';
115:33127
grant select,usage on mobile_crm.* to 'jiaxuetao_r'@'10.10.10.39' identified by 'jiaxuetao_r@#1';
flush privileges;
100.24:
王晓炜
grant all on *.* to 'wangxiaowei_rw'@'%' identified by 'wangxiaowei!@#';
flush privileges;
查询:grant select,usage on appadmin.* to 'wangpengfei_r'@'10.10.10.39' identified by 'wangpengfei_r!@#'
错误代码: 1044
Access denied for user 'chenh'@'10.10.10.39' to database 'appadmin'
-------------------------
发现是mysql默认数据库里的logs表,不能被加锁(lock tables)引起的。--lock-tables=0:不锁表备份
mysqldump -uroot -p --lock-tables=0 mysql >/root/mysqlprv.sql
mysqldump -uroot -pYDtg@612 mysql >/root/mysqlprv2.sql
mysql -uadminch -p'*B605AE15C73F51DA63BFE3111E968C0A410E4F4D'
-skip-grant-tables
mysqldump: Got error: 1556: You can't use locks with log tables. when using LOCK TABLES
[1 Jul 2008 18:14] Sveta Smirnova
Thank you for the report.
Verified as described. Probably duplicate of bug #34306.
Test case:
select 1;
--exec $MYSQL_DUMP mysql general_log
Options:
--log-output=TABLE --log
egrep -v "#|\*|--|^$" mysqlprv.sql.2
mysql -uroot -pYDtg@612 -S /var/databases/mobile/mysql.sock mysql user >mysql_user.sql
mysql -uroot -pYDtg@612
use mysql;
INSERT INTO `user` VALUES ('localhost','root','*2C2C461A198E81E755233A41340908984B984877','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0),('127.0.0.1','root','*2C2C461A198E81E755233A41340908984B984877','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0),('localhost','tgwap','*5B70591C04EB0611C2A79954EFBDDC5FBBB815CD','Y','Y','Y','Y','N','N','Y','N','Y','Y','N','Y','Y','N','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0),('10.10.20.139','tgwap','*5B70591C04EB0611C2A79954EFBDDC5FBBB815CD','Y','Y','Y','Y','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('192.168.%.%','tgwap','*5B70591C04EB0611C2A79954EFBDDC5FBBB815CD','Y','Y','Y','Y','N','N','Y','N','Y','Y','N','Y','Y','N','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0),('10.10.%.%','tgwap','*5B70591C04EB0611C2A79954EFBDDC5FBBB815CD','Y','Y','Y','Y','N','N','Y','N','Y','Y','N','Y','Y','N','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0),('10.10.10.120','biselect','*BF652E1DA7BD6A90A1B5E387D768841F3E35CEDA','Y','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.10.120','mobileselect','*054E07DCB185488266FBBDBC46D8712966B92BA8','Y','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.20.%','_del_mobile_digi','*FFFBC4FB84878A38B3BF12B3E4E24E751C9D0555','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('localhost','script','*9F711306729703C16347B99B93CC8035190CCFAE','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.20.110','cactiuser','*43DD7940383044FBDE5B177730FAD3405BC6DAD7','N','N','N','N','N','N','N','N','Y','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('%','script','*9F711306729703C16347B99B93CC8035190CCFAE','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.20.%','uapp_app','*13DA357B34F3E99A595424AB439D674AE5C04C73','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.20.%','_dmobile_digital','','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.20.%','uws_waptuku','*94118202C42DEA9B18D1705C567D3E9621079E4E','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('localhost','monitor','*B25DC303840BB35DE25FF34226D7CCB3482BA77E','Y','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('192.168.%','zhuangxiu','*8B88C25DFBE770095FF440727D92232D18C6E0A5','Y','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.%','zhuangxiu','*8B88C25DFBE770095FF440727D92232D18C6E0A5','Y','Y','Y','Y','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.%','usvr_ssis','*DB65EA2322C029E1A3C75749A469A95106937140','Y','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','Y','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.20.%','_del_zhuangxiu','*8B88C25DFBE770095FF440727D92232D18C6E0A5','Y','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.20.%','_del_liweishi','*09FAB5CDD4005671FF5E9B68052B46EF400980D8','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('192.168.99.157','_del_liweishi','*09FAB5CDD4005671FF5E9B68052B46EF400980D8','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.20.%','usvr_replicate','*7FE421F5A9A6FECB19DBFF4C52BA35700D215671','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','Y','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('192.168.99.%','us_zhjing','*8338C6A5F3E032441C77D7EFBD65BA6EE5EA1F49','Y','Y','Y','Y','N','N','Y','N','Y','Y','Y','Y','Y','N','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0),('%','usvr_backup','*E9382B7C030E8F605A18DCC211923C0258F91AB7','Y','N','N','N','N','N','Y','N','N','Y','N','N','N','N','Y','N','N','Y','N','N','Y','N','N','N','N','N','N','N','','','','',0,0,0,0),('localhost','admin','*0108F76A1B387B83CEACDD74221B18BF86FFEF9A','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('%','root','*3D8D82A53A71F6E3BD29F3234EC0844505BD1122','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0),('localhost','usvr_dbmonitor','*55770B9D02C239533CA1F1B55D486050E76F75DE','Y','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','Y','N','N','N','N','N','N','N','','','','',0,0,0,0),('10.10.20.%','usvr_checksums','*39BF3FC1890A52A2B9090C9B2A3383C045A06172','Y','Y','Y','Y','N','N','N','N','Y','N','N','N','N','N','N','N','N','Y','N','Y','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('%','un_wu','*2E41B0CC310C139E937DF85FC3833581D5058AA3','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0),('192.168.9%','rm_dev_dev','*5FA77BD5F1AB706CB152522C1706E05E29AEEFCA','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('%','rm_dev_dev','','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('%','usr_package','*2EAF1D742A5F9B03F52C81539AB448F118B1024E','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('%','usr_chengzhang','*5600BAEC9AAAF5245D7707B1B88B858CC35D43E6','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('%','%','','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0),('%','usr_xm_jerry','*7DE14644018E5ADE87E33D76083EDA21134752A5','Y','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0,0);
commit;
InnoDB: Warning: purge reached the head of the history list
unit from the internal data dictionary of InnoDB though the .frm file for the table exists
grant all on *.* to 'admin'@'%' identified by '123456';
错误:
ERROR 1045 (28000): Access denied for user 'root'@'%' (using password: YES)
解决:
select * from mysql.user where user='root' and host='%'\G
update mysql.user set Grant_priv='Y' where user='root' and host='%';
flush privileges;
----------------------
添加一个实例:
mkdir -p /data/cms
chown -R mysql:mysql /data/cms
./mysql_install_db --user=mysql --basedir=/usr/local/mysql/ --datadir=/data/cms
[root@db2 cms]# more /data/cms/my.cnf
[client]
password = 123456
port = 3307
socket = /data/cms/mysql.sock
default-character-set=utf8
pid-file = /data/cms/mysql_3307.pid
[mysqld]
port = 3307
socket = /data/cms/mysql.sock
skip-external-locking
pid-file = /data/cms/mysql_3307.pid
server-id = 45
basedir=/usr/local/mysql/
datadir=/data/cms
#binlog_format=STATEMENT
binlog_format=row
log-bin=/data/cms/archive/mysql-bin
default_storage_engine=InnoDB
open_files_limit = 65535
sync_binlog = 1
max_allowed_packet = 52m
default-storage-engine = InnoDB
innodb_flush_log_at_trx_commit = 2
innodb_flush_method=O_DIRECT
启动:
./mysqld_safe --defaults-file=/data/cms/my.cnf &
mysql -uroot -p123456 -P3307
注:多实例的区分主要通过:port,pid,socket来区分
-----------------------
公司备份恢复测试:innobackupex
mysql 版本:Server version: 5.1.58 MySQL Community Server (GPL)
恢复:
增量备份的恢复比全备要复杂很多,第一步是在所有备份目录下重做已提交的日志,如:
innobackupex --apply-log --redo-only BASE-DIR
innobackupex --apply-log --redo-only BASE-DIR --incremental-dir=INCREMENTAL-DIR-1
innobackupex --apply-log BASE-DIR --incremental-dir=INCREMENTAL-DIR-2
其中BASE-DIR是指全备目录,INCREMENTAL-DIR-1是指第一次的增量备份,INCREMENTAL-DIR-2是指第二次的增量备份,以此类推。
这里要注意的是:最后一步的增量备份并没有--redo-only选项!还有,可以使用--use_memory提高性能。
以上语句执行成功之后,最终数据在BASE-DIR(即全备目录)下。
第一步完成之后,我们开始第二步:回滚未完成的日志:
innobackupex --apply-log BASE-DIR
上面执行完之后,BASE-DIR里的备份文件已完全准备就绪,最后一步是拷贝:
innobackupex --copy-back BASE-DIR
同样地,拷贝结束之后,记得检查下数据目录的权限是否正确。
增量备份:
增量备份需要基于全备,先假设我们已经有了一个全备(/backup/mysql/data/2013-10-29_09-05-25),在该全表的基础上做增量备份。
innobackupex --defaults-file=/opt/mysql/my.cnf --user=root --password=*** --incremental-basedir=/backup/mysql/data/2013-10-29_09-05-25 --incremental /backup/mysql/data
其中--incremental-basedir指向全备目录,--incremental指向增量备份的目录。
那么,我们是否可以在增量备份的基础上再做增量备份呢?答案是肯定的,只要把--incremental-basedir执行上一次增量备份的目录即可,如下所示:
innobackupex --defaults-file=/opt/mysql/my.cnf --user=root --password=*** --incremental-basedir=/backup/mysql/data/2013-10-29_09-52-37 --incremental /backup/mysql/data
full backup:
$ innobackupex /data/backups
incr1 backup:
innobackupex --incremental /data/backups --incremental-basedir=BASEDIR
incr2 backup:
innobackupex --incremental /data/backups --incremental-basedir=INCREMENTAL-DIR-1
restore :
innobackupex --apply-log --redo-only BASE-DIR
innobackupex --apply-log --redo-only BASE-DIR --incremental-dir=INCREMENTAL-DIR-1
innobackupex --apply-log BASE-DIR --incremental-dir=INCREMENTAL-DIR-2
全备:
innobackupex --host=$ip --user=usvr_backup --password='B1@k&6agU#p' --defaults-file=$cnfpath/etc/my.cnf --slave-info --databases=$dbn SqlFullBackup/ >/tmp/Full.txt
增量:
innobackupex --host=$ip --user=usvr_backup --password='B1@k&6agU#p' --defaults-file=$cnfpath/etc/my.cnf --slave-info --databases=$dbn --incremental
--incremental-basedir="SqlFullBackup/"$fullbakfile"/" SqlLogBackup_Last/ >/tmp/Log.txt
增量:
innobackupex --host=$ip --user=usvr_backup --password='B1@k&6agU#p' --defaults-file=$cnfpath/etc/my.cnf --databases=$dbn --slave-info --incremental -
-incremental-basedir="SqlLogBackup_Last/"$Logfile"/" SqlLogBackup_Last/ >/tmp/Log.txt
drwxr-xr-x 3 root root 4096 May 11 08:24 SqlFullBackup
drwxr-xr-x 3 root root 4096 May 11 09:39 SqlLogBackup
drwxr-xr-x 3 root root 4096 May 11 09:39 SqlLogBackup_Last
恢复:
1.全库恢复
innobackupex --host=127.0.0.1 --user=root --password='123456' --port=3307 --defaults-file=/data/cms/my.cnf --apply-log --redo-only /data/2015-05-10/SqlFullBackup/2015-05-10_08-24-27
2.第一次增量恢复
innobackupex --host=127.0.0.1 --user=root --password='123456' --port=3307 --defaults-file=/data/cms/my.cnf --apply-log --redo-only /data/2015-05-10/SqlFullBackup/2015-05-10_08-24-27 --incremental-dir=/data/2015-05-10/SqlLogBackup/2015-05-10_08-28-40
innobackupex --host=127.0.0.1 --user=root --password='123456' --port=3307 --defaults-file=/data/cms/my.cnf --apply-log --redo-only /data/2015-05-10/SqlFullBackup/2015-05-10_08-24-27 --incremental-dir=/data/2015-05-10/SqlLogBackup/2015-05-10_09-36-20
innobackupex --host=127.0.0.1 --user=root --password='123456' --port=3307 --defaults-file=/data/cms/my.cnf --apply-log --redo-only /data/2015-05-10/SqlFullBackup/2015-05-10_08-24-27 --incremental-dir=/data/2015-05-10/SqlLogBackup/2015-05-10_11-32-58
innobackupex --host=127.0.0.1 --user=root --password='123456' --port=3307 --defaults-file=/data/cms/my.cnf --apply-log --redo-only /data/2015-05-10/SqlFullBackup/2015-05-10_08-24-27 --incremental-dir=/data/2015-05-10/SqlLogBackup/2015-05-10_14-28-55
innobackupex --host=127.0.0.1 --user=root --password='123456' --port=3307 --defaults-file=/data/cms/my.cnf --apply-log --redo-only /data/2015-05-10/SqlFullBackup/2015-05-10_08-24-27 --incremental-dir=/data/2015-05-10/SqlLogBackup/2015-05-10_17-13-18
3.第二次增量恢复
innobackupex --host=127.0.0.1 --user=root --password='123456' --port=3307 --defaults-file=/data/cms/my.cnf --apply-log --redo-only /data/2015-05-10/SqlFullBackup/2015-05-10_08-24-27 --incremental-dir=/data/2015-05-10/SqlLogBackup_Last/2015-05-10_20-13-17
4.还原数据库:
innobackupex --host=127.0.0.1 --user=root --password='123456' --port=3307 --defaults-file=/data/cms/my.cnf --copy-back /data/2015-05-10/SqlFullBackup/2015-05-10_08-24-27
chown -R mysql:mysql /data/cms
/usr/local/mysql51/bin/mysqld_safe --defaults-file=/data/cms/my.cnf &
启动:
/usr/local/mysql51/bin/mysqld --defaults-file=/data/cms/my.cnf --user=mysql &
./mysqladmin -uroot password '123456' -S /data/cms/mysql.sock
mysql -uroot -pQeeKa09rOOt. -P3307 -h127.0.0.1
用该备份配置从库
more xtrabackup_slave_info
CHANGE MASTER TO MASTER_LOG_FILE='mysql-bin.000598', MASTER_LOG_POS=979723631
GRANT REPLICATION SLAVE ON *.* TO 'slave'@'%' IDENTIFIED BY '123456';
flush privileges;
CHANGE MASTER TO
MASTER_HOST='10.10.20.149',
MASTER_USER='slave',
MASTER_PASSWORD='123456',
MASTER_PORT=3306,
MASTER_LOG_FILE='mysql-bin.000598',
MASTER_LOG_POS=979723631,
MASTER_CONNECT_RETRY=10;
show slave status\G
Seconds_Behind_Master: 0 //0秒,说明SLAVE延迟0秒
------------------
通过从库5.1.58-log升级到5.6.19:ok
tar zxvf mysql-5.6.19-linux-glibc2.5-x86_64.tar.gz
mv mysql-5.6.19-linux-glibc2.5-x86_64 /usr/local/mysql56
cd /usr/local/mysql56
chown -R mysql:mysql mysql56/
以新版本5.6启动:
/usr/local/mysql56/bin/mysqld --defaults-file=/data/cms/my.cnf --user=mysql --lc_messages_dir=/usr/local/mysql56/share/english &
升级:
/usr/local/mysql56/bin/mysql_upgrade -uroot -p'QeeKa09rOOt.' -P3307 -h127.0.0.1
/usr/local/mysql56/bin/mysql -uroot -p'QeeKa09rOOt.' -P3307 -h127.0.0.1
[root@db2 cms]# more my.cnf
[client]
#password = 123456
port = 3307
socket = /data/cms/mysql.sock
default-character-set=utf8
pid-file = /data/cms/mysql_3307.pid
[mysqld]
port = 3307
socket = /data/cms/mysql.sock
skip-external-locking
pid-file = /data/cms/mysql_3307.pid
server-id = 45
basedir=/usr/local/mysql56/
datadir=/data/cms
open_files_limit = 65535
sync_binlog = 1
max_allowed_packet = 52m
replicate-ignore-db = ConfigDB
master:5.1.58-log ----slave:5.6.19 同步正常
mysql5.6从库延时复制
登陆到Slave数据库服务器
mysql>stop slave;
mysql>CHANGE MASTER TO MASTER_DELAY = 600;
mysql>start slave;
mysql>show slave status \G;
600秒延时
主从复制:
这里只讲一下2个参数,其他全部是从库连接主库的信息和中间日志relay-log的设置。
master-connect-retry=30 #这个选项控制重试间隔,默认为60秒。
slave-skip-errors=1007,1008,1053,1062,1213,1158,1159 #这个是在同步过程中忽略掉的错误,这些错误不会影响数据的完整性,有事经常出现的错误,一般设置忽略。其中1062为主键重复错误。
SHOW BINLOG EVENTS in 'mysql-bin.000017' from 1919\G
切换基本思路:为了让slave赶上备份服务器,并在正确的位置停止,使用start slave until命令。
slave>start slave until master_log_file='master-bin-000006',master_log_pos=700;
slave>select master_pos_wait('master-bin-000006',700);
-----主从复制故障,恢复单个表
insert into cms.cms_admin_log values (null,'192.168.99.128',null,null,null,null,null,null,null);
select * from cms.cms_admin_log where ip_addr='192.168.99.128';
| 5415791 | 192.168.99.128 | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
Replicate_Wild_Ignore_Table:
Last_Errno: 1062
Last_Error: Error 'Duplicate entry '5415791' for key 'PRIMARY'' on query. Default database: 'cms'. Query: 'INSERT INTO `cms_admin_log` (`c`, `m`, `query_string`, `request_uri`, `ip_addr`, `user_id`, `user_name`, `access_time`) VALUES ('pagelist', 0, 'c=pagelist', '/content/index.php?c=pagelist', '180.175.0.59', '12808685', 'zhangchen1', 1431425060)'
mysqldump -uroot -p -P3307 -h127.0.0.1 --single-transaction --master-data cms cms_admin_log > cms_admin_log.sql
--------
----
下面各个备份的LSN
more /data/2015-05-10/SqlFullBackup/2015-05-10_08-24-27/xtrabackup_checkpoints
backup_type = full-backuped
from_lsn = 0
to_lsn = 371853468650
last_lsn = 371853468650
恢复所有增量后:
backup_type = full-prepared
from_lsn = 0
to_lsn = 372353540363
last_lsn = 372353540363
compact = 0
more /data/2015-05-10/SqlLogBackup/2015-05-10_08-28-40/xtrabackup_checkpoints
checkpoints
backup_type = incremental
from_lsn = 371853468650
to_lsn = 371855547493
last_lsn = 371855547493
more /data/2015-05-10/SqlLogBackup/2015-05-10_09-36-20/xtrabackup_checkpoints
backup_type = incremental
from_lsn = 371855547493
to_lsn = 371902423906
last_lsn = 371902423906
more /data/2015-05-10/SqlLogBackup/2015-05-10_11-32-58/xtrabackup_checkpoints
backup_type = incremental
from_lsn = 371902423906
to_lsn = 371989521666
last_lsn = 371989521666
more /data/2015-05-10/SqlLogBackup/2015-05-10_14-28-55/xtrabackup_checkpoints
backup_type = incremental
from_lsn = 371989521666
to_lsn = 372108248894
last_lsn = 372108248894
more /data/2015-05-10/SqlLogBackup/2015-05-10_17-13-18/xtrabackup_checkpoints
backup_type = incremental
from_lsn = 372108248894
to_lsn = 372219720531
last_lsn = 372219720531
more /data/2015-05-10/SqlLogBackup_Last/2015-05-10_20-13-17/xtrabackup_checkpoints
backup_type = incremental
from_lsn = 372219720531
to_lsn = 372353540363
last_lsn = 372353540363
全量:
ll /data/2015-05-10/SqlFullBackup/2015-05-10_08-24-27
增量:
[root@db2 SqlLogBackup]# ll /data/2015-05-10/SqlLogBackup
total 20
drwxr-xr-x. 6 root root 4096 May 10 08:31 2015-05-10_08-28-40
drwxr-xr-x. 6 root root 4096 May 10 09:39 2015-05-10_09-36-20
drwxr-xr-x. 6 root root 4096 May 10 11:38 2015-05-10_11-32-58
drwxr-xr-x. 6 root root 4096 May 10 14:32 2015-05-10_14-28-55
drwxr-xr-x. 6 root root 4096 May 10 17:15 2015-05-10_17-13-18
增量:
ll /data/2015-05-10/SqlLogBackup_Last/2015-05-10_20-13-17
添加外键:
ALTER TABLE action_list ADD CONSTRAINT fk_users_id FOREIGN KEY(user_id) REFERENCES users(id);
ALTER TABLE action_list ADD CONSTRAINT fk_server_id FOREIGN KEY(server_id) REFERENCES servers(id);
linux 批量杀死进程的命令
ps aux | grep procname | awk ‘{print $2}’ | xargs kill -9
mysql 出现copy to tmp table 的解决办法
解決方法:
1. 使用show full processlist查看完整的SQL语法
2. explain SQL,检查造成SQL缓慢的原因
3. 增加需要的index
4. 把my.cnf中的tmp_table_size加大
5. 把my.cnf中的tmpdir目录改為tmpfs,以加快IO效率
6. mount tmpfs /mysql_tmp -t tmpfs
要使用tmpfs,您要在内核配置时,启用“Virtual memory file system support”。
为防止tmpfs使用了全部VM,有时候要限制其大小。要创建一个最大为32 MB的tmpfs文件系统,键入:
# mount tmpfs /dev/shm -t tmpfs -o size=32m
添加到 /etc/fstab,应该是这样:
tmpfs /dev/shm tmpfs size=32m 0 0
如何限制虚拟内存的使用:
shell> sysctl -w vm.swappiness=0
如何立马生效:
/sbin/sysctl –p
/usr/local/mysql/bin/mysqld_safe --skip-grant-tables &
mysql 远程连接不上,由于bind-address问题
netstat -apn|grep 3360
修改mysql的配置文件/etc/mysql/my.conf,将bind-address后面增加远程访问IP地址或者禁掉这句话就可以让远程机登陆访问了。
记得要重启mysql服务哦
局域网内连接其他机器的mysql/l_13_1.htm" title="MYSQL" >MYSQL,发现速度慢的很,不知道什么原因,总有几秒的延迟.
后来在网上发现解决方法,my.ini里面添加
[mysqld]
skip-name-resolve
skip-grant-tables
这样速度就快了!
skip-name-resolve
选项就能禁用DNS解析,连接速度会快很多。不过,这样的话就不能在MySQL的授权表中使用主机名了而只能用ip格式。
若使用-skip-grant-tables系统将对任何用户的访问不做任何访问控制,但可以用 mysqladmin flush-privileges或mysqladmin reload来开启访问控制;默认情况是show databases语句对所有用户开放,
slave stop;
SET GLOBAL SQL_SLAVE_SKIP_COUNTER = 1
replicate-ignore-db = ConfigDB
查看一个库中的表行数及大小:
SELECT CONCAT(table_schema,'.',table_name) AS 'Table Name', CONCAT(ROUND(table_rows/1000000,4),'M') AS 'Number of Rows', CONCAT(ROUND(data_length/(1024*1024*1024),4),'G') AS 'Data Size', CONCAT(ROUND(index_length/(1024*1024*1024),4),'G') AS 'Index Size', CONCAT(ROUND((data_length+index_length)/(1024*1024*1024),4),'G') AS'Total'FROM information_schema.TABLES WHERE table_schema LIKE 'cmsreal' order by table_rows desc;
select max(record_id) from cmsreal.cms_form_record;
SHOW GLOBAL VARIABLES;
grep -c processor /proc/cpuinfo
free -m | grep Mem | awk '{ print $2 }'
uname -m
mysql性能:
http://www.askapache.com/mysql/performance-tuning-mysql.html
http://www.aodba.com/en/87-2/
------------------
root@db2 tnslave]# more my.cnf
[client]
port = 3309
socket = /data/tnslave/mysql.sock
default-character-set=utf8
pid-file = /data/tnslave/mysql_3309.pid
[mysqld]
port = 3309
socket = /data/tnslave/mysql.sock
skip-external-locking
pid-file = /data/tnslave/mysql_3309.pid
server-id = 46
basedir=/usr/local/mysql56/
datadir=/data/tnslave
binlog_format=mix
log-bin=/data/tnslave/mysql-bin
open_files_limit = 65535
sync_binlog = 1
max_allowed_packet = 52m
replicate-ignore-db = ConfigDB
mkdir tnslave
chown -R mysql:mysql tnslave
/usr/local/mysql56/scripts/mysql_install_db --user=mysql --basedir=/usr/local/mysql56/ --datadir=/data/tnslave/
/usr/local/mysql56/bin/mysqld --defaults-file=/data/tn/my.cnf --user=mysql &
/usr/local/mysql56/bin/mysqld --defaults-file=/data/tnslave/my.cnf --user=mysql &
mysql -uroot -S /data/tnslave/mysql.sock
mysql -uroot -S /data/tn/mysql.sock
mysqladmin -uroot -S /data/tn/mysql.sock password '123456'
mysqladmin -uroot -S /data/tnslave/mysql.sock password '123456'
mysql -uroot -p123456 -S /data/tn/mysql.sock
mysql -uroot -p123456 -S /data/tnslave/mysql.sock
向mysql批量插入测试数据
DROP PROCEDURE test_insert ;
DELIMITER ;;
CREATE PROCEDURE test_insert ()
BEGIN
DECLARE i INT DEFAULT 0;
WHILE i<10
DO
INSERT INTO CMS.CMS_Module
(
ModuleType,
ModuleName,
ModuleDescription,
DesignHTML,
QUERY
)
VALUES
(
'2',
CONCAT('活动',i),
'测试加入',
'aaaaaaaaaa',
''
);
SET i=i+1;
END WHILE ;
END;;
CALL test_insert();
create database tn default character set utf8;
create table tn_user (id int primary key auto_increment,name varchar(30),cre_date datetime);
create table test_test (id int primary key auto_increment,name varchar(30),cre_date datetime);
vi indata.sh
#!/bin/bash
i=1;
MAX_INSERT_ROW_COUNT=$1;
while [ $i -le $MAX_INSERT_ROW_COUNT ]
do
mysql -uroot -p123456 -S /data/tn/mysql.sock tn -e "insert into tn_user (id,name,cre_date) values (null,concat('chen',$i),NOW());"
mysql -uroot -p123456 -S /data/tn/mysql.sock test -e "insert into test_test (id,name,cre_date) values (null,concat('chen',$i),NOW());"
i=$(($i+1))
sleep 0.05
done
exit 0
sh indata.sh 10000
主库导出:
mysqldump -uroot -p123456 -S /data/tn/mysql.sock -A --flush-privileges --single-transaction --master-data=2 >tnfull.sql
从库导入:
mysql -uroot -p123456 -S /data/tnslave/mysql.sock <tnfull.sql
GRANT REPLICATION SLAVE ON *.* TO 'slave'@'%' IDENTIFIED BY '123456';
flush privileges;
reset slave;
CHANGE MASTER TO
MASTER_HOST='192.168.99.128',
MASTER_USER='slave',
MASTER_PASSWORD='123456',
MASTER_PORT=3308,
MASTER_LOG_FILE='mysql-bin.000002',
MASTER_LOG_POS=1772865,
MASTER_CONNECT_RETRY=10;
start slave;
insert into tn_user (id,name,cre_date) values (null,concat('chen','hahaha'),NOW());
复制出错:
Last_Errno: 1062
Last_Error: Error 'Duplicate entry '15535' for key 'PRIMARY'' on query. Default database: 'tn'. Query: 'insert into tn_user (id,name,cre_date) values (null,concat('chen',14535),NOW())'
从库:
stop slave;
主库:
/usr/local/mysql56/bin/mysqldump -uroot -p123456 -S /data/tn/mysql.sock tn tn_user --master-data=2 >tn_user2.sql
-- CHANGE MASTER TO MASTER_LOG_FILE='mysql-bin.000002', MASTER_LOG_POS=16015722;
从库:
start slave until MASTER_LOG_FILE='mysql-bin.000002',MASTER_LOG_POS=16015722;
show slave status\G;
Last_Errno: 1062
Last_Error: Error 'Duplicate entry '15535' for key 'PRIMARY'' on query. Default database: 'tn'. Query: 'insert into tn_user (id,name,cre_date) values (null,concat('chen',14535),NOW())'
stop slave;set global SQL_SLAVE_SKIP_COUNTER=1;
从库导入:
mysql -uroot -p123456 -S /data/tnslave/mysql.sock tn<tn_user2.sql
start slave;
主:
mysql> select count(*) from tn.tn_user;
+----------+
| count(*) |
+----------+
| 24644 |
+----------+
从:
mysql> select count(*) from tn.tn_user;
+----------+
| count(*) |
+----------+
| 21355 |
+----------+
修复从库不一致的数据
https://www.percona.com/doc/percona-toolkit/2.2/
MySQL主从服务器数据一致性的核对:
pt-table-checksum --help
GRANT SELECT, PROCESS, SUPER, REPLICATION SLAVE ON *.* TO 'checksums'@'%' IDENTIFIED BY '123456';
flush privileges;
pt-table-checksum --replicate=tn.checksums --host=127.0.0.1 --user=checksums --password=123456 --port=3308 --databases=tn
pt-table-checksum --replicate=tn.checksums -S /data/tn/mysql.sock --user=checksums --password=123456 --databases=tn
常用参数
--nocheck-replication-filters :不检查复制过滤器,建议启用。后面可以用--databases来指定需要检查的数据库。
--no-check-binlog-format : 不检查复制的binlog模式,要是binlog模式是ROW,则会报错。
--replicate-check-only :只显示不同步的信息。
--replicate= :把checksum的信息写入到指定表中,建议直接写到被检查的数据库当中。
--databases= :指定需要被检查的数据库,多个则用逗号隔开。
--tables= :指定需要被检查的表,多个用逗号隔开
Diffs cannot be detected because no slaves were found. Please read the --recursion-method documentation for information.
从字面意思上看是,主库找不到从数据库。只需要在从库配置文件/具体目录/my.cnf中添加
report_host=slave_ip
report_port=slave_port
即可。
是指找不到slave,检查可能会无效。需要指定 --recursion-method=processlist
pt-table-checksum --replicate=tn.checksums -S /data/tn/mysql.sock --user=root --password=123456 --databases=tn --recursion-method=processlist
pt-table-sync
http://www.torrentkitty.org/search/
可检查mysqld服务器支持的存储引擎:
SHOW ENGINES;
检查与你感兴趣的存储引擎有关的变量值:
SHOW VARIABLES LIKE 'have_%';
检查表的标类型:
SHOW TABLE STATUS LIKE 'tbl_name';
mysql -uroot -p123456 -e "select user,host,password from user" mysql
shell> TMPDIR=/some_tmp_dir/
shell> MYSQL_UNIX_PORT=/some_tmp_dir/mysql.sock
shell> export TMPDIR MYSQL_UNIX_PORT
在mysql.server启动服务器之前,它把目录改变到MySQL安装目录,然后调用safe_mysqld。如果你想要作为一些特定的用户运行服务器,
在/etc/my.cnf选项文件的[mysqld]组增加相应user选项,如本节后面所示。(如果你有在一个非标准的地点安装的二进制分发版,
你可能需要编辑mysql.server。修改它,运行safe_mysqld前,cd到正确的目录。注意如果你修改mysql.server,那么某个时候升级MySQL时,
你的修改版本将被覆盖,因此你应该做一个你可重新安装的编辑过的版本的拷贝)。
mysql.server脚本使用下列变量:basedir、datadir和pid-file。定义后,必须将它们放到选项文件中,不能放到命令行。
mysql.server只识别start和stop命令行参数。
vi my.cnf
[mysql.server]
basedir=/usr/local/mysql56
vi mysql.server
basedir=/usr/local/mysql56
datadir=/data/tn
mysqld_pid_file_path=/data/cms/mysql_3308.pid
./mysqld --basedir=/usr/local --datadir= --verbose --help
mysqldump --quick db_name | gzip > db_name.contents.gz
mysqladmin create db_name
gunzip < db_name.contents.gz | mysql db_name
首先,创建保存输出文件的目录并备份数据库:
shell> mkdir DUMPDIR
shell>mysqldump --tab=DUMPDIR db_name
然后将DUMPDIR目录中的文件转移到目标机上相应的目录中并将文件装载到MySQL:
shell> mysqladmin create db_name # create database
shell> cat DUMPDIR/*.sql | mysql db_name # create tables in database
shell> mysqlimport db_name DUMPDIR/*.txt # load data into tables
2.12.1.2. Linux二进制分发版说明
MySQL的Linux-Intel二进制和RPM发布配置为最高的可能速度。我们一直在尝试使用可用的最快的稳定的编译器。
二进制发布用-staticis连接,说明一般情况你不需要关心系统库的版本。你也不需要安装LinuxThreads。
用-staticis连接的程序稍微大于动态连接程序,但也稍微快一些(3-5%)。但是,静态连接程序的一个问题是你不能使用用户定义函数(UDF)。
如果你将要写或使用UDF(只适用于C或C++ 编程人员),你必须使用动态链接自己编译MySQL。
另外,可以使用许多Linux分发版使用的sysctl工具设置启动参数(包括SuSE Linux 8.0和以后版本)。将下面的值放到文件 /etc/sysctl.conf中:
# Increase some values for MySQL
fs.file-max = 65536
fs.dquot-max = 8192
fs.super-max = 1024
你还应将以下内容加入/etc/my.cnf:
[mysqld_safe]
open-files-limit=8192
这样服务器连接和打开文件的总数目可以达到8,192。
在Unix中安装Perl模块最简单的方法是使用CPAN模块。例如:
shell> perl -MCPAN -e shell
cpan> install DBI
cpan> force install DBD::mysql
DBD::mysql安装运行许多测试。测试中使用默认用户名和密码尝试连接本地MySQL服务器。
(默认用户名是Unix中的登录名和Windows中的ODBC。默认密码“没有密码。”) 如果你不能通过这些值连接服务器
(例如,如果你的账户有密码),测试失败。可以使用force install DBD::mysql忽略失败的测试。
还可以下载tar文件压缩格式的模块分发版,并手动构建模块。例如,解压缩和构建DBI分发版的过程为:
1. 将分发版解压缩到当前目录:
2. shell> gunzip < DBI-VERSION.tar.gz | tar xvf -
该命令创建目录 DBI-VERSION。
3. 进入解压缩分发版的顶级目录:
4. shell> cd DBI-VERSION
5. 构建分发版并编译:
6. shell> perl Makefile.PL
7. shell> make
8. shell> make test
9. shell> make install
如果你决定不想执行正在输入过程中的一个命令,输入\c取消它:
mysql> SELECT
-> USER()
-> \c
SELECT * FROM pet WHERE birth > '1998-1-1';
默认排序是升序,最小的值在第一。要想以降序排序,在你正在排序的列名上增加DESC(降序 )关键字:
mysql> SELECT name, birth FROM pet ORDER BY birth DESC;
你可以找在5月出生的动物 (5月),方法是:
mysql> SELECT name, birth FROM pet WHERE MONTH(birth) = 5;
SQL模式匹配允许你使用“_”匹配任何单个字符,而“%”匹配任意数目字符(包括零字符)。
由MySQL提供的模式匹配的其它类型是使用扩展正则表达式。当你对这类模式进行匹配测试时,使用REGEXP和NOT REGEXP操作符(或RLIKE和NOT RLIKE,它们是同义词)。
扩展正则表达式的一些字符是:
· ‘.’匹配任何单个的字符。
· 字符类“[...]”匹配在方括号内的任何字符。例如,“[abc]”匹配“a”、“b”或“c”。为了命名字符的范围,使用一个“-”。“[a-z]”匹配任何字母,而“[0-9]”匹配任何数字。
· “ * ”匹配零个或多个在它前面的字符。例如,“x*”匹配任何数量的“x”字符,“[0-9]*”匹配任何数量的数字,而“.*”匹配任何数量的任何字符。
如果REGEXP模式与被测试值的任何地方匹配,模式就匹配(这不同于LIKE模式匹配,只有与整个值匹配,模式才匹配)。
为了定位一个模式以便它必须匹配被测试值的开始或结尾,在模式开始处使用“^”或在模式的结尾用“$”。
如果你想强制使REGEXP比较区分大小写,使用BINARY关键字使其中一个字符串变为二进制字符串。该查询只匹配名称首字母的小写‘b’。
mysql> SELECT * FROM pet WHERE name REGEXP BINARY '^b';
mysql> select user,host from user where user regexp binary '^u';
+----------------+------------+
| user | host |
+----------------+------------+
| un_wu | % |
| usvr_ssis | 10.10.% |
| usvr_backup | 10.10.20.% |
| usvr_checksums | 10.10.20.% |
| usvr_replicate | 10.10.20.% |
| usvr_dbmonitor | localhost |
+----------------+------------+
了找出包含一个“w”的名字,使用以下查询:
mysql> SELECT * FROM pet WHERE name REGEXP 'w';
select user,host from user where user regexp 'w';
+-----------------+------------+
| user | host |
+-----------------+------------+
| un_wu | % |
| _del_un_owenpan | 10.10.20.% |
+-----------------+------------+
如果你有一个产生多个输出的查询,你可以通过一个分页器而不是盯着它翻屏到屏幕的顶端来运行输出:
· shell> mysql < batch-file | more
你可以捕捉文件中的输出以便进行进一步的处理:
· shell> mysql < batch-file > mysql.out
使用用户变量
mysql> SELECT @min_price:=MIN(price),@max_price:=MAX(price) FROM shop;
mysql> SELECT * FROM shop WHERE price=@min_price OR price=@max_price;
使用AUTO_INCREMENT
可以通过AUTO_INCREMENT属性为新的行产生唯一的标识:
CREATE TABLE animals (
id MEDIUMINT NOT NULL AUTO_INCREMENT,
name CHAR(30) NOT NULL,
PRIMARY KEY (id)
);
select LAST_INSERT_ID();
要想以AUTO_INCREMENT值开始而不是1,你可以通过CREATE TABLE或ALTER TABLE来设置该值,如下所示:
mysql> ALTER TABLE tbl AUTO_INCREMENT = 100;
可以按这种方式传递多个SQL语句,用分号隔开:
shell> mysql -u root -p --execute="SELECT Name FROM Country WHERE Name LIKE 'AU%';SELECT COUNT(*) FROM City" world
请注意长形式(--execute)后面必须紧跟一个等号(=)。
mysql -uroot -p123456 -S /data/cms/mysql.sock -e "select user,host from user;show tables;" mysql
在shell脚本中,可以使用my_print_defaults程序来分析选项文件。下面的例子显示了当要求显示[client]和[mysql]组内发现的选项时my_print_defaults产生的输出:
shell> ./my_print_defaults --defaults-extra-file=/data/tn/my.cnf client mysql
--port=3308
--socket=/data/tn/mysql.sock
--default-character-set=utf8
--pid-file=/data/cms/mysql_3308.pid
Default options are read from the following files in the given order:
/etc/my.cnf /etc/mysql/my.cnf /usr/local/mysql/etc/my.cnf ~/.my.cnf
在Unix中的语法取决于你的外壳。假定你想要使用MYSQL_TCP_PORT变量指定TCP/IP端口号。典型的语法为(例如sh、bash、zsh等等):
MYSQL_TCP_PORT=3306
export MYSQL_TCP_PORT
3315
/usr/databases/weixin3315
mkdir data etc logs
/usr/databases/weixin3315/data
/usr/local/mysql5619/bin/mysqld --defaults-file=/usr/databases/weixin3315/etc/my.cnf --basedir=/usr/local/mysql5619/ --datadir=/usr/databases/weixin3315/data --user=mysql --log-error=/usr/databases/weixin3315/logs/mysql.err --pid-file=/usr/databases/weixin3315/logs/mysql.pid --socket=/usr/databases/weixin3315/mysql.sock --port=3315 &
[client]
#password = your_password
port = 3315
socket = /usr/databases/weixin3315/mysql.sock
default-character-set=utf8
[mysqld]
port = 3315
socket = /usr/databases/weixin3315/mysql.sock
datadir=/usr/databases/weixin3315/data
pid-file=/usr/databases/weixin3315/logs/mysql.pid
default_storage_engine=InnoDB
character_set_server=utf8
max_connections=3000
innodb_lock_wait_timeout=800000000
skip-locking
slave-skip-errors=1062
ft_min_word_len=1
log-error = /usr/databases/weixin3315/logs/mysql.err
chown -R mysql:mysql weixin3315/
主:
server_id=1
log-bin=mysql-bin
binlog_cache_size = 1M
binlog_format=mixed
expire_logs_days=30
log_bin_trust_function_creators=TRUE
slow_query_log = 1
long_query_time = 1
slow_query_log_file = mysql-slow.log
innodb_file_per_table = 1
innodb_buffer_pool_size = 2G
skip-name-resolve
从:
[client]
#password = your_password
port = 3315
socket = /usr/databases/weixin3315/mysql.sock
default-character-set=utf8
[mysqld]
port = 3315
socket = /usr/databases/weixin3315/mysql.sock
datadir=/usr/databases/weixin3315/data
pid-file=/usr/databases/weixin3315/logs/mysql.pid
default_storage_engine=InnoDB
character_set_server=utf8
max_connections=3000
innodb_lock_wait_timeout=800000000
slave-skip-errors=1062
ft_min_word_len=1
log-error = /usr/databases/weixin3315/logs/mysql.err
server-id=3315
grant replication slave, replication client on *.* to 'usvr_replicate'@'10.10.20.%' identified by 'usvr_replicate@#';
grant replication slave on *.* to 'usvr_replicate'@'10.10.20.%' identified by 'usvr_replicate!@#';
主导出数据:
FLUSH TABLES WITH READ LOCK;
mysqldump -uroot -p --all-databases --default-character-set=utf8 --flush-privileges --single-transaction --flush-logs --triggers --routines --events --hex-blob >/backup/alldb.sql
mysqldump -uroot -p --all-databases --default-character-set=utf8 --master-data=2 --flush-privileges --single-transaction --flush-logs --events --hex-blob >/backup/alldb5.sql
YDtg@612
-- CHANGE MASTER TO MASTER_LOG_FILE='mysql-bin.000013', MASTER_LOG_POS=120;
/usr/local/mysql5619/bin/mysql -uroot -S /usr/databases/weixin3315/mysql.sock </root/alldb4.sql
GRANT REPLICATION SLAVE ON *.* TO 'usvr_replicate'@'10.10.20.%' IDENTIFIED BY 'usvr_replicate!@#';
flush privileges;
ERROR 2013 (HY000): Lost connection to MySQL server during query
mysql_upgrade -u root -pYDtg@612
reset slave;
CHANGE MASTER TO
MASTER_HOST='10.10.20.153',
MASTER_USER='usvr_replicate',
MASTER_PASSWORD='usvr_replicate!@#',
MASTER_PORT=3306,
MASTER_LOG_FILE='mysql-bin.000013',
MASTER_LOG_POS=120,
MASTER_CONNECT_RETRY=10;
CHANGE MASTER TO MASTER_PASSWORD='usvr_replicate!@#'
CHANGE MASTER TO MASTER_HOST='10.10.20.153',MASTER_USER='usvr_replicate',MASTER_PASSWORD='usvr_replicate!@#',MASTER_LOG_FILE='mysql-bin.000015',MASTER_LOG_POS=1273;
/usr/local/mysql/bin/mysqld --basedir=/usr/local/mysql --datadir=/usr/local/mysql/data --plugin-dir=/usr/local/mysql/lib/plugin --user=mysql --log-error=/usr/local/mysql/data/sjhqweb153.err --pid-file=/usr/local/mysql/data/sjhqweb153.pid
ERROR 2013 (HY000): Lost connection to MySQL server during query
这是因为数据库没有升级,数据库权限表还是老的
# Generated by Percona Configuration Wizard (http://tools.percona.com/) version REL5-20120208
[mysql]
# CLIENT #
port = 3306
socket = /usr/local/mysql/mysql.sock
[mysqld]
# GENERAL #
user = mysql
default-storage-engine = InnoDB
socket = /usr/local/mysql/mysql.sock
pid-file = /usr/local/mysql/mysql.pid
# MyISAM #
key-buffer-size = 32M
myisam-recover = FORCE,BACKUP
# SAFETY #
max-allowed-packet = 16M
max-connect-errors = 1000000
# DATA STORAGE #
datadir = /usr/local/mysql/
# BINARY LOGGING #
log-bin = /usr/local/mysql/mysql-bin
expire-logs-days = 14
sync-binlog = 1
# CACHES AND LIMITS #
tmp-table-size = 32M
max-heap-table-size = 32M
query-cache-type = 0
query-cache-size = 0
max-connections = 500
thread-cache-size = 50
open-files-limit = 65535
table-definition-cache = 4096
table-open-cache = 16
# INNODB #
innodb-flush-method = O_DIRECT
innodb-log-files-in-group = 2
innodb-log-file-size = 128M
innodb-flush-log-at-trx-commit = 1
innodb-file-per-table = 1
innodb-buffer-pool-size = 2G
# LOGGING #
log-error = /usr/local/mysql/mysql-error.log
log-queries-not-using-indexes = 1
slow-query-log = 1
slow-query-log-file = /usr/local/mysql/mysql-slow.log
----查询缓存----------
http://imysql.cn/tag/query-cache
等待查询缓存锁是特定连接状态MySQL 5.5或更新的。
它可以看到显示PROCESSLIST命令输出,当一个线程正在运行一个查询等待访问查询缓存——一个MySQL缓冲记忆的结果SELECT语句。
一个线程可能需要访问或搜索匹配结果的查询处理,或执行完成后,将结果存储到缓存中。
uery_cache_type
值域为:0 -– 不启用查询缓存;
值域为:1 -– 启用查询缓存,只要符合查询缓存的要求,客户端的查询语句和记录集斗可以
缓存起来,共其他客户端使用;
值域为:2 -– 启用查询缓存,只要查询语句中添加了参数:sql_cache,且符合查询缓存的要求,客户端的查询语句和记录集,则可以缓存起来,共其他客户端使用;
query_cache_size:推荐设置 为:64M;
query_cache_limit
限制查询缓存区最大能缓存的查询记录集,可以避免一个大的查询记录集占去大量的内存区域,而且往往小查询记录集是最有效的缓存记录集,默认设置为1M,建议修改为16k~1024k之间的值域,不过最重要的是根据自己应用的实际情况进行分析、预估来设置;
query_cache_wlock_invalidate
该参数主要涉及MyISAM引擎,若一个客户端对某表加了写锁,其他客户端发起的查询请求,且查询语句有对应的查询缓存记录,是否允许直接读取查询缓存的记录集信息,还是等待写锁的释放。默认设置为0,也即允许;
查询缓区的碎片整理
查询缓存使用一段时间之后,一般都会出现内存碎片,为此需要监控相关状态值,并且定期进行内存碎片的整理,碎片整理的操作语句:FLUSH QUERY CACHE;
查询缓存中涉及的表,每一个表对象都有一个属于自己的全局性质的锁;
经过测试,可以得到下面几个重要结论(详细测试过程请见最后):
1、想要彻底关闭query cache,务必在一开始就设置 query_cache_type = 0,即便是启动后将 query_cache_type 从 1 改成 0,也不行;
2、即便query_cache_size = 0,但 query_cache_type 非 0 的话,在实际环境中,可能会频繁发生 Waiting for query cache lock;
3、一开始就设置 query_cache_type = 0 的话,没有办法在运行 过程中再次动态启用,反过来则可以。也就是说,一开始是启用 query cache 的, 在运行过程中将其关闭,但事实上仍然会发生 Waiting for query cache lock,并没有真正的关闭;
此外,QC也不适用于下面几个场景:
1、子查询或者外层查询;
2、存储过程、存储函数、触发器、event中调用的SQL,或者引用到这些结果的;
3、包含一些特殊函数时,例如:BENCHMARK()、CURDATE()、CURRENT_TIMESTAMP()、NOW()、RAND()、UUID()等等;
4、读取mysql、INFORMATION_SCHEMA、performance_schema 库数据的;
5、类似SELECT…LOCK IN SHARE MODE、SELECT…FOR UPDATE、SELECT..INTO OUTFILE/DUMPFILE、SELECT..WHRE…IS NULL等语句;
6、SELECT执行计划用到临时表(TEMPORARY TABLE);
7、未引用任何表的查询,例如 SELECT 1+1 这种;
8、产生了 warnings 的查询;
9、SELECT语句里加了 SQL_NO_CACHE 关键字;
最为重要的是,在MySQL里QC是由一个全局锁在控制,每次更新QC的内存块都需要进行锁定。
Waiting for query cache lock
? Waiting on query cache mutex
说了这么多废话,其实核心要点就一个:
如果线上环境中99%以上都是只读,很少有更新,再考虑开启QC吧,否则,就别开了。
关闭方法很简单,有两种:
1、同时设置选项 query_cache_type = 0 和 query_cache_size = 0;
2、如果用源码编译MySQL的话,编译时增加参数 --without-query-cache 即可;
从上面看出, 第一次执行该 SQL, MySQL 需要对 SQL 进行锁缓存,初始化,从缓存中查询是否具备之前缓存过的 SQL,
检查用户权限, 表权限,打开表,锁定内存,定制执行计划,执行语句,把数据从磁盘中放入内存中操作,关闭表,锁定数据,
缓存数据等操作, 工作原理与 ORACLE 类似
------------------MySQL和线程缓存大小-----------
mysqladmin -u root -p -r -i 1 ext | grep Threads_created
YDtg@612
参数thread_cache_size定义了多少未使用的线程可以在任何时候保持活着。
----未使用的索引----
SELECT TABLE_NAME, INDEX_NAME FROM INFORMATION_SCHEMA.INDEX_STATISTICS WHERE TABLE_SCHEMA='myprod' ORDER BY ROWS_READ DESC;
pt-index-usage –
---------show profiles;--------
set profiling=1;
select count(1) as cnt from tran_excution;
show profile source for query 1;
show profile cpu for query 2;
----
http://www.housong.net/mysql-normal-trouble-shooting.html
一、性能故障
1. 数据库主机LOAD飙高
(1)一般导致MySQL服务器LOAD突然飙高,可能的五种情况:
– 全表扫描的SQL语句;
– SELECT操作语句的执行计划走错;
– 存在UPDATE/DELETE 语句没有索引可选择,而导致堵塞其他SQL语句的执行;
– 存在修改表结构或OPTIMIZE 语句执行;
– 大数据量的导入或导出,尤其数据库的逻辑备份操作;
– 业务量大到超过服务器处理能力(我们大家都高度关注业务发展,以及公司业务特点,
– 还有与开发和运营保持良好联系,很难出现未知的业务突然爆发性增长);
2)要解决LOAD飙高,必须先找到造成飙高的真实原因,请登陆数据库服务器后,执行命令:
SHOW PROCESSLIST;
或
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST WHERE COMMAND <> ‘sleep’ AND TIME>100;
3)若一直处在执行状态,且执行时间比较久,可以分析下SQL语句执行计划:
mysql>EXPLAIN SQL语句
(4)检查io请求状态
使用Linux命令: iostat
查看r/s(读请求),w/s(写请求),avgrq-sz(平均请求大小),await(IO等待), svctm(IO响应时间)
2. 请求处理波动,应用大量请求超时失败
(1)检查数据库的响应状态,使用tcprstat
tcprstat -l $ip_addr -t $interval -n $count
(2)分析关键指标
com_select/insert/update/delete
innodb_rows_read/update/delete/insert
innodb_logicial_read/physical read
(3)转向分析slow query, 检查可疑SQL
1
EXPLAIN SQL语句;
如有长时间运行异常SQL, 可以Kill SQL PID.
(4)重点关注索引的使用问题
二、复制故障
1. 复制中断
(1)备机无法连接到主服务器,可能是网络问题,也可能是主服务器的mysqld已停止;
(2)主键冲突;
(3)主从服务器数据不一致;
(4)其他原因;
为使复制继续,我们可以进行如下处理:
(1)stop slave ;
(2)start slave;
(3)检查服务是否正常:show slave status\G
若是主健冲突或数据不一致的情况,则需要额外处理:
(1)停止slave进程
1
STOP SLAVE;
(2)设置事务号,事务号从Retrieved_Gtid_Set获取
1
SET @@SESSION.GTID_NEXT= 'xxxxxxxxxxx'
(3)设置空事务
1
BEGIN; COMMIT;
(4)恢复事务号
1
SET SESSION GTID_NEXT = AUTOMATIC;
(5)启动slave进程
1
START SLAVE;
三、实例故障
1.MySQL实例假死
(1)再次确认mysql的运行状态
查看源代码打印帮助
1
SHOW PROCESSLIST;是否有大量请求在等待处理
此时,为校验是否真处于MysQL假死状态,那么可以用test库中任意执行创建表或更新数据的语句,若回车键后没有响应,则一般可以断定MySQL 是否已经处于假死状态。
----tcpstat:分析响应时间-------------
http://www.360doc.com/content/14/0327/16/1123425_364180294.shtml
下载二进制版本:tcprstat
mv tcprstat-static.v0.3.1.x86_64 tcprstat
chmod +x tcprstat
./tcprstat -p 3306 -t 1 -n 10
pcap: SIOCGIFFLAGS: bonding_masters: No such device
./tcprstat -p 3306 -t 1 -n 10 -l 10.10.20.109
timestamp count max min avg med stddev 95_max 95_avg 95_std 99_max 99_avg 99_std
1431672918 34 248791 34 19915 194 51899 120451 9501 27855 124298 12980 33758
1431672919 10 118340 148 15487 285 35314 29073 4059 8926 29073 4059 8926
1431672920 21 231236 135 21078 254 54004 43790 4966 12432 117054 10570 27269
1431672921 51 116927 75 3411 150 16533 2805 535 813 28442 1141 3994
1431672922 87 117710 47 1827 166 12513 1350 374 374 4730 480 662
1431672923 1066 584260 22 1469 160 24693 784 155 84 2670 196 227
1431672924 1102 563287 26 1165 156 22987 177 142 34 2659 162 181
1431672925 984 1092542 66 2347 138 44492 170 137 19 280 139 23
1431672926 960 246209 60 577 159 8795 200 150 23 1934 175 173
1431672927 21 518918 67 42541 287 115612 126808 10877 31643 167775 18722 46049
-p:端口 3306
-t:时间间隔 1s
-n:打印几次 10次
count:此间隔内处理完成的请求数量,第一行表示1秒处理了34个mysql请求
95_avg:此间隔内,95%的请求量的平均响应时间,9501毫秒=9.5秒,该值比较能体现mysql的查询平均响应时间
./tcprstat --help
./tcprstat -f '%n\n' -p 3306 -t 1 -n 0 -l 10.10.20.109
./tcprstat -f '%M\t%95M\t%95a\n' -p 3306 -t 1 -n 0 -l 10.10.20.109
oracle:
./tcprstat -p 1521 -t 1 -n 10
http://chenlinux.com/2010/12/28/intro-tcprstat/
统计分析机器的所有ip地址
./tcprstat -p 3306 -t 1 -n 0 -l `/sbin/ifconfig | grep 'addr:[^ ]\+' -o | cut -f 2 -d : | xargs echo | sed -e 's/ /,/g'`
用tcpdump采集网卡数据
tcpdump -s 65535 -x -nn -q -tttt -i bond0 port 3306 -c 50000 -w tcpdump.pcat
使用tcprstat分析由tcpdump采集的数据
./tcprstat -l 10.10.20.109 -p 3306 -t 1 -r tcpdump.pcat
--------------
http://os.51cto.com/art/201011/233915.htm
----------------MySQL性能医生:OrzDBA----------------
https://blog.hackroad.com/operations-engineer/linux_server/8237.html
http://www.freebuf.com/tools/5262.html
http://code.taobao.org/p/orzdba/src/trunk/
http://code.taobao.org/svn/orzdba/trunk
A trunk/orzdba_rt_depend_perl_module.tar.gz
A trunk/orzdba
A trunk/orzdba工具使用说明.pdf
vi orzdba.txt
需要在代码160行左右配置MySQL的相关验证信息,如username,password,host,port,sock等
㈠ 查看MySQL响应时间(rt)
先安装tcprstat
# wget http://github.com/downloads/Lowercases/tcprstat/tcprstat-static.v0.3.1.x86_64
# ln -sf /root/tcprstat-static.v0.3.1.x86_64 /usr/bin/tcprstat
在源码161行左右把 /usr/bin/tcprstat 加入默认读取的位置
cp tcprstat /usr/bin/
chown?root:root?/usr/bin/tcprstat
chmod?u+s?/usr/bin/tcprstat
再解压orzdba_rt_depend_perl_module.tar.gz,安装rt所依赖的perl模块
具体做法可参考http://wenku.baidu.com/view/79ca1eb065ce0508763213bf
tar zxvf orzdba_rt_depend_perl_module.tar.gz
cd Perl_Module
安装version模块:
tar zxvf version-0.99.tar.gz
cd version-0.99
perl Makefile.PL
make
make test
make install
安装Class-Data-Inheritable模块:
tar zxvf Class-Data-Inheritable-0.08.tar.gz
cd Class-Data-Inheritable-0.08
perl Makefile.Pl
make
make test
make install
安装Module-Build模块:
tar zxvf Module-Build-0.31.tar.gz
cd Module-Build-0.31
perl Build.PL
./Build
./Build test
./Build install
安装安装File::Lockfile模块:
tar zxvf File-Lockfile-v1.0.5.tar.gz
cd File-Lockfile-v1.0.5
perl Build.PL
./Build
./Build test
./Build install
grep -n -A1 ‘my $MYSQL’ orzdba
cp orzdba.txt orzdba.sh
chmoc +x orzdba.sh
vi orzdba.sh
加一行:
my $pass = 'YDtg@612'; # -pass
修改下面行:
my $MYSQL = qq{mysql -s --skip-column-names -uroot -p$pass -P$port };
$MYSQL .= qq{-S$socket } if defined $socket;
my $TCPRSTAT = "/usr/bin/tcprstat --no-header -t 1 -n 0 -p $port -l 10.10.20.109";
#$TCPRSTAT .= " -l $ip";
㈠ 查看MySQL响应时间(rt)
./orzdba.sh -lazy -rt -S /tmp/mysql.sock
/usr/bin/tcprstat --no-header -t 1 -n 0 -p 3306 -l 10.10.20.109
㈡ 监控InnoDB的各项指标
./orzdba.sh -innodb -S /tmp/mysql.sock
㈢ 监控MySQL Server性能
./orzdba.sh -mysql -S /tmp/mysql.sock
./orzdba.sh -help
nohup ./orzdba.sh -lazy -rt -S /tmp/mysql.sock -L /tmp/orzdba.log > /dev/null 2>&1 &
其中,查看DISK(-d)和NET(-n)需要带具体的设备名(具体可以查看/proc/diskstats和/proc/net/dev中的设备或者可以取自iostat和sar -n DEV)。
[root@sjOrderDBm109 ch]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup00-LogVol00
525G 67G 431G 14% /
/dev/cciss/c0d0p1 97M 20M 72M 22% /boot
tmpfs 7.9G 0 7.9G 0% /dev/shm
vgdisplay
lvdisplay
--- Logical volume ---
LV Name /dev/VolGroup00/LogVol00
查看挂载
cat /etc/fstab
df -Th
[root@sjOrderDBm109 ch]# cat /etc/fstab
/dev/VolGroup00/LogVol00 / ext3 defaults 1 1
./orzdba.sh -d /dev/VolGroup00/LogVol01
mysql数据库层参数:
show global status where Variable_name in('xxxx')或者show global status like 'xxx%'
采集的数据一般是当前已经采集到的总数量,如果要计算每秒的数量,可以这样计算,以TPS为例
TPS=当前的Comm_commit-前面采集的Comm_Commit/时间间隔
以下是主要的监控指标:
1)Sql语句执行次数QPS------(Com_select、Com_insert、Com_update、Com_delete)
2)事务TPS----(Com_commit、Com_rollback)
3)Row的状态(每秒增加修改删除的行数)-----(Innodb_rows_inserted、Innodb_rows_updated、Innodb_rows_deleted、Innodb_rows_read)
4)Page 状态(buffer pool中的总页数、free、dirty、每秒flushed的page数量)----(Innodb_buffer_pool_pages_flushed<page flush的数量>、Innodb_buffer_pool_pages_data<总的量>、Innodb_buffer_pool_pages_free<free的页数量>、Innodb_buffer_pool_pages_dirty<脏页数量>)
5)查询缓存Hit(query cache hit率)-----(inno_buffer_pool_read_requests<逻辑读总次数>、Innodb_buffer_pool_reads<物理读总次数>)
6)Data状态(内存buffer中数据每秒读、写次数,每秒读写大小M)----(Innodb_data_reads<数据读总次数>、Innodb_data_writes<数据写的总次数>、Innodb_data_read<至此已经读的数据量>、Innodb_data_written<至此已经写的数据量>)
7)Log(log_fsync每秒日志sync到磁盘的次数,log每秒写的大小M)-----(Innodb_os_log_fsyncs<向日志文件写的总次数>、Innodb_os_log_written<写入日志文件的字节数>)
8)线程状态(每秒创建的线程数量)------(Threads_running<激活状态的线程数>、Threads_connected<当前打开的连接数量>、Threads_created<新建线程的数量>、Threads_cached<线程的缓存值>)
9)每秒从客户端接受或者发送的数据量M---(Bytes_received、Bytes_sent)
---清空表故障处理--------------------------------
shop company
mysql-bin.000110
mysql -uroot -pYDtg@612 -e "show binlog events in 'mysql-bin.000110';"
mysql-bin.000110 576477657 Query 20118 576477745 use `NEWMALL`; TRUNCATE TABLE company
mysql-bin.000110 576478865 Query 20118 576478950 use `NEWMALL`; TRUNCATE TABLE shop
mysqlbinlog --no-defaults mysql-bin.000110 |grep --ignore-case TRUNCATE -A3 -B4
mysqlbinlog --no-defaults mysql-bin.000110 >/tmp/bin110.sql
mysqlbinlog --no-defaults -d NEWMALL mysql-bin.000110 >/tmp/newmall.sql
mysqlbinlog --no-defaults -d NEWMALL mysql-bin.000110 --start-date="2015-05-18 06:20:00" --stop-date="2015-05-18 16:24:17" -r /tmp/newmall2.sql
more /tmp/newmall2.sql |grep -i "update shop " >/root/newmall_shop.sql
more /root/newmall_shop.sql |grep -v -i "update shop set tg_mark" >/root/newmall_shop2.sql
mysql -uroot -pYDtg@612 NEWMALL </root/newmall_shop2.sql
more /tmp/newmall2.sql |grep -i "update company " >/root/newmall_company.sql
mysql -uroot -pYDtg@612 NEWMALL </root/newmall_company.sql
--default-character-set=utf8
egrep -v "#|\*|--|^$" /tmp/newmall2.sql
mysqlbinlog --no-defaults -d NEWMALL --base64-output=decode-rows -v mysql-bin.000110 >/tmp/newmall3.sql
mysqlbinlog --no-defaults -d NEWMALL --base64-output=decode-rows -v mysql-bin.000110 --start-date="2015-05-18 06:20:00" --stop-date="2015-05-18 16:24:17">/tmp/newmall4.sql
more /tmp/newmall4.sql |egrep -i "###" |sed -e 's/###/ /g' |sed -e 's/@1/id/g' |sed -e 's/@2/val/g' |sed -e 's/@3/data/g' >/tmp/newmall4_1.sql
more /newmall_update.sql |grep -i "update shop " |more
more /tmp/newmall2.sql |grep --ignore-case -E 'insert|update|delete' -A2 -B2|grep -w '\bcompany\b'
-v 反向选择,即找 没有搜索字符串的行
-w, --word-regexp 强制 PATTERN 仅完全匹配字词(匹配一行中的某个完整单词:company)同'\bcompany\b'
more /tmp/newmall2.sql |grep --ignore-case -E 'insert|update|delete' -A2 -B2|grep -wi '\bcompany\b'
mysql> show master logs;
+------------------+------------+
| Log_name | File_size |
+------------------+------------+
| mysql-bin.000107 | 1073742034 |
| mysql-bin.000108 | 1073742075 |
| mysql-bin.000109 | 1073742327 |
| mysql-bin.000110 | 607103626 |
+------------------+------------+
LOAD DATA LOCAL INFILE '/root/shop.txt' INTO TABLE NEWMALL.shop;
LOAD DATA LOCAL INFILE '/root/company.txt' INTO TABLE NEWMALL.company;
use NEWMALL/*!*/;
SET TIMESTAMP=1431937457/*!*/;
insert into cart(user_id,shop_id,item_id,product_count,single_price,add_time) values(3088643,5775,184702,1,1096,'2015-05-18 16:24:17')
/*!*/;
# at 576477630
#150518 16:24:17 server id 20118 end_log_pos 576477657 Xid = 5329191322
COMMIT/*!*/;
# at 576477657
#150518 16:24:18 server id 20118 end_log_pos 576477745 Query thread_id=144096493 exec_time=0 error_code=0
SET TIMESTAMP=1431937458/*!*/;
TRUNCATE TABLE company
/*!*/;
# at 576477745
#150518 16:24:18 server id 20118 end_log_pos 576477811 Query thread_id=144007224 exec_time=0 error_code=0
SET TIMESTAMP=1431937458/*!*/;
BEGIN
/*!*/;
# at 576477811
# at 576478838
#150518 16:24:21 server id 20118 end_log_pos 576478865 Xid = 5329193863
COMMIT/*!*/;
# at 576478865
#150518 16:24:21 server id 20118 end_log_pos 576478950 Query thread_id=144096508 exec_time=0 error_code=0
use NEWMALL/*!*/;
SET TIMESTAMP=1431937461/*!*/;
TRUNCATE TABLE shop
/*!*/;
# at 576478950
#150518 16:24:21 server id 20118 end_log_pos 576479016 Query thread_id=144007224 exec_time=0 error_code=0
SET TIMESTAMP=1431937461/*!*/;
SET @@session.autocommit=1/*!*/;
BEGIN
/*!*/;
# at 576479016
查看mysql的历史记录:
more .mysql_history
mysqldump -uroot -p123456 test -T /root/tmp
(Errcode: 13) when executing 'SELECT INTO OUTFILE'
解决:
vi /etc/selinux/config
getenforce
setenforce 0
getsebool -a
setsebool -P mysqld_disable_trans=1
mysqldump -uroot -p123456 test -T /root/tmp
chmod -R 777 /usr/backup/dump/weixin/table/
/usr/local/mysql5619/bin/mysqldump -uusvr_backup -p'B1@k&6agU#p' -h10.10.20.155 -P 3315 weixin -T /usr/backup/dump/weixin/table/(`date +%F`)
表分离备份:
vi weixin_tab.sh
#!/bin/bash
DATABASE=`/usr/local/mysql5619/bin/mysql -uusvr_backup -p'B1@k&6agU#p' -P 3315 -h10.10.20.155 -e "show databases"|sed '1d'|egrep -v "mysql|test|ConfigDB|performance_s
chema|information_schema"`
for database in $DATABASE
do [ ! -d /usr/backup/dump/weixin/table/`date +%F` ] && mkdir -p /usr/backup/dump/weixin/table/`date +%F` && chmod -R 777 /usr/backup/dump/weixin/table/`date +%F`
/usr/local/mysql5619/bin/mysqldump -uusvr_backup -p'B1@k&6agU#p' -h10.10.20.155 -P 3315 --skip-tz-utc --opt --default-character-set=utf8 weixin -T /usr/backup/dump/weixin/table/`date +%F`
done
mysqlbinlog --no-defaults -d NEWMALL mysql-bin.000110 |grep --ignore-case "TRUNCATE TABLE SHOP" -A3 -B4 |more
COMMIT/*!*/;
# at 576478865
#150518 16:24:21 server id 20118 end_log_pos 576478950 Query thread_id=144096508 exec_time=0 error_code=0
SET TIMESTAMP=1431937461/*!*/;
TRUNCATE TABLE shop
/*!*/;
# at 576478950
# at 576479016
SELECT from_unixtime('1431937461');
2015-05-18 16:24:21
mysqlbinlog --no-defaults -d NEWMALL mysql-bin.000110 --start-date="2015-05-18 06:20:00" --stop-date="2015-05-18 16:24:17" -r /tmp/newmall2.sql
除了GRANT,你可以直接用INSERT语句创建相同的账户,然后使用FLUSH PRIVILEGES告诉服务器重载授权表:
shell> mysql --user=root mysql
mysql> INSERT INTO user
-> VALUES('localhost','monty',PASSWORD('some_pass'),
-> 'Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y');
INSERT INTO user SET Host='localhost',User='admin',
-> Reload_priv='Y', Process_priv='Y';
INSERT INTO user (Host,User,Password)
-> VALUES('localhost','dummy','');
FLUSH PRIVILEGES;
恢复时MySQL二进制日志很重要,因为它们是增量备份。如果进行完全备份时确保清空了日志,则后面创建的二进制日志文件包含了备份后的所有数据更改。
让我们稍稍修改前面的mysqldump命令,让它在完全备份时能够清空 MySQL二进制日志,以便转储文件包含包含新的当前的二进制日志:
shell> mysqldump --single-transaction --flush-logs --master-data=2
--all-databases > backup_sunday_1_PM.sql
mysql < backup_sunday_1_PM.sql
mysqlbinlog gbichot2-bin.000007 gbichot2-bin.000008 | mysql
5.9.3.2. 指定恢复位置
也可以不指定日期和时间,而使用mysqlbinlog的选项--start-position和--stop-position来指定日志位置。它们的作用与起止日选项相同,不同的是给出了从日志起的位置号。使用日志位置是更准确的恢复方法,特别是当由于破坏性SQL语句同时发生许多事务的时候。要想确定位置号,可以运行mysqlbinlog寻找执行了不期望的事务的时间范围,但应将结果重新指向文本文件以便进行检查。操作方法为:
mysqlbinlog --start-date="2005-04-20 9:55:00" --stop-date="2005-04-20 10:05:00" \
/var/log/mysql/bin.123456 > /tmp/mysql_restore.sql
该命令将在/tmp目录创建小的文本文件,将显示执行了错误的SQL语句时的SQL语句。你可以用文本编辑器打开该文件,寻找你不要想重复的语句。如果二进制日志中的位置号用于停止和继续恢复操作,应进行注释。用log_pos加一个数字来标记位置。使用位置号恢复了以前的备份文件后,你应从命令行输入下面内容:
mysqlbinlog --stop-position="368312" /var/log/mysql/bin.123456 \
| mysql -u root -pmypwd
mysqlbinlog --start-position="368315" /var/log/mysql/bin.123456 \
| mysql -u root -pmypwd \
----------------------------------
SSH 建立信任关系
1、生成A机器的公私钥匙对
ssh-keygen -t rsa
2、拷贝A机器的公钥到B机器的.shh目录,加入到authorized_keys文件中
scp .ssh/id_rsa.pub root@10.10.40.108:/root/.ssh/
3、登录B机器,追加A机器的公钥到B机器的认证文件中:
cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
你可以在A机器用SSH命令直接登录到B机器了
ssh 10.10.40.108
------------------------
远程无密码拷贝115上的备份
115上只保留3天的备份
远程存储主机:10.10.40.108
Xtrabackup:
scpxtrbackup.sh
scp /var/backup/Xtrabackup/qeekapay/*.tar.gz 10.10.40.108:/backup/Xtrabackup/qeekapay
scp /var/backup/Xtrabackup/newmall/*.tar.gz 10.10.40.108:/backup/Xtrabackup/newmall
scp /var/backup/Xtrabackup/orders/*.tar.gz 10.10.40.108:/backup/Xtrabackup/orders
scp /var/backup/Xtrabackup/mobile/*.tar.gz 10.10.40.108:/backup/Xtrabackup/mobile
scp /var/backup/Xtrabackup/cms/*.tar.gz 10.10.40.108:/backup/Xtrabackup/cms
scp /var/backup/Xtrabackup/service/*.tar.gz 10.10.40.108:/backup/Xtrabackup/service
scp /var/backup/Xtrabackup/newdecoration/*.tar.gz 10.10.40.108:/backup/Xtrabackup/newdecoration
scp /var/backup/Xtrabackup/phpcms/*.tar.gz 10.10.40.108:/backup/Xtrabackup/phpcms
scp /var/backup/Xtrabackup/guide/*.tar.gz 10.10.40.108:/backup/Xtrabackup/guide
mysqlbackup_single:
scpmysqldump.sh
scp /var/backup/mysqlbackup_single/newmall/*.tar.gz 10.10.40.108:/backup/mysqlbackup_single/newmall
scp /var/backup/mysqlbackup_single/orders/*.tar.gz 10.10.40.108:/backup/mysqlbackup_single/orders
scp /var/backup/mysqlbackup_single/newdecoration/*.tar.gz 10.10.40.108:/backup/mysqlbackup_single/newdecoration
scp /var/backup/mysqlbackup_single/cms/*.tar.gz 10.10.40.108:/backup/mysqlbackup_single/cms
scp /var/backup/mysqlbackup_single/config/*.tar.gz 10.10.40.108:/backup/mysqlbackup_single/config
scp /var/backup/mysqlbackup_single/guide/*.tar.gz 10.10.40.108:/backup/mysqlbackup_single/guide
scp /var/backup/mysqlbackup_single/mobile/*.tar.gz 10.10.40.108:/backup/mysqlbackup_single/mobile
scp /var/backup/mysqlbackup_single/phpcms/*.tar.gz 10.10.40.108:/backup/mysqlbackup_single/phpcms
scp /var/backup/mysqlbackup_single/qeekapay/*.tar.gz 10.10.40.108:/backup/mysqlbackup_single/qeekapay
scp /var/backup/mysqlbackup_single/service/*.tar.gz 10.10.40.108:/backup/mysqlbackup_single/service
mongodb_bak:
scpmongo.sh
scp /var/backup/mongodb_bak/sj330actDB119/*.tar.gz 10.10.40.108:/backup/mongodb_bak/sj330actDB119
scp /var/backup/mongodb_bak/sjBBSdbm132/*.tar.gz 10.10.40.108:/backup/mongodb_bak/sjBBSdbm132
scp /var/backup/mongodb_bak/sjNmallimgDB122/*.tar.gz 10.10.40.108:/backup/mongodb_bak/sjNmallimgDB122
scp /var/backup/mongodb_bak/sjUserCenter148/*.tar.gz 10.10.40.108:/backup/mongodb_bak/sjUserCenter148
scp /var/backup/mongodb_bak/zxdb120/*.tar.gz 10.10.40.108:/backup/mongodb_bak/zxdb120
查找前一天的文件:
find /var/backup/Xtrabackup/qeekapay -name "*.tar.gz" -mtime -1
find /var/backup/Xtrabackup/qeekapay -name "*.tar.gz" -mtime -2
[root@localhost Xtrabackup]# date
Wed May 27 16:07:17 CST 2015
[root@localhost Xtrabackup]# find /var/backup/Xtrabackup/qeekapay -name "*.tar.gz" -mtime -1
/var/backup/Xtrabackup/qeekapay/2015-05-26.tar.gz
[root@localhost Xtrabackup]# find /var/backup/Xtrabackup/qeekapay -name "*.tar.gz" -mtime -2
/var/backup/Xtrabackup/qeekapay/2015-05-26.tar.gz
/var/backup/Xtrabackup/qeekapay/2015-05-25.tar.gz
Rsync命令是一个远程同步程序,与scp相比,它可以以最小的代价备份文件,只备份有差异的文件,这样每次备份就少了很多时间,此外在传输协议上除了自身的协议之外,还支持以ssh的方式传输。只要加个ssh参数即可,常用的命令格式:
[plain] view plaincopy
rsync -avH [ssh] /path/to/source user@des:/path/to/local ##本地同步到远程,推
rsync -avH [ssh] user@des:/path/to/source /path/to/local ##远程同步到本地,拉
scp /var/backup/Xtrabackup/qeekapay/
[root@localhost sj330actDB119]# more /home/mysql/rmbackup.sh
find /var/backup/Xtrabackup/qeekapay -name "*.tar.gz" -mtime +7 -exec rm {} \;
find /var/backup/Xtrabackup/newmall -name "*.tar.gz" -mtime +7 -exec rm {} \;
find /var/backup/Xtrabackup/orders -name "*.tar.gz" -mtime +7 -exec rm {} \;
find /var/backup/Xtrabackup/mobile -name "*.tar.gz" -mtime +7 -exec rm {} \;
find /var/backup/Xtrabackup/cms -name "*.tar.gz" -mtime +7 -exec rm {} \;
find /var/backup/Xtrabackup/service -name "*.tar.gz" -mtime +7 -exec rm {} \;
find /var/backup/Xtrabackup/newdecoration -name "*.tar.gz" -mtime +14 -exec rm {} \;
find /var/backup/mysqlbackup_single/newmall -name "*.tar.gz" -mtime +14 -exec rm {} \;
find /var/backup/mysqlbackup_single/qeekapay -name "*.tar.gz" -mtime +14 -exec rm {} \;
find /var/backup/mysqlbackup_single/cms -name "*.tar.gz" -mtime +14 -exec rm {} \;
find /var/backup/mysqlbackup_single/mobile -name "*.tar.gz" -mtime +14 -exec rm {} \;
find /var/backup/mysqlbackup_single/newdecoration -name "*.tar.gz" -mtime +14 -exec rm {} \;
find /var/backup/mysqlbackup_single/orders -name "*.tar.gz" -mtime +14 -exec rm {} \;
find /var/backup/mysqlbackup_single/phpcms -name "*.tar.gz" -mtime +14 -exec rm {} \;
find /var/backup/mysqlbackup_single/service -name "*.tar.gz" -mtime +14 -exec rm {} \;
find /var/backup/mongodb_bak/sj330actDB119 -name "*.tar.gz" -mtime +14 -exec rm {} \;
find /var/backup/mongodb_bak/sjNmallimgDB122 -name "*.tar.gz" -mtime +14 -exec rm {} \;
find /var/backup/mongodb_bak/sjUserCenter148 -name "*.tar.gz" -mtime +14 -exec rm {} \;
非交互远程执行命令:
ssh root@10.10.40.108 'ls -l /backup/Xtrabackup/qeekapay/*'
find /var/backup/Xtrabackup/qeekapay -name "*.tar.gz" -mtime -1
scp /var/backup/Xtrabackup/qeekapay/*.tar.gz 10.10.40.108:/backup/Xtrabackup/qeekapay
-------------------
远程增量备份文件:ok
xscp.sh
ssh root@10.10.40.108 'ls -l /backup/Xtrabackup/qeekapay/*.tar.gz' |awk '{print $9}' |cut -d / -f 5 >/tmp/xqeekapay_remote.txt
ls -l /var/backup/Xtrabackup/qeekapay/*.tar.gz |awk '{print $9}' |cut -d / -f 6 >/tmp/xqeekapay_local.txt
comm -23 /tmp/xqeekapay_local.txt /tmp/xqeekapay_remote.txt >/tmp/xqeekapay_diff.txt
cat /tmp/xqeekapay_diff.txt | while read LINE
do
echo $LINE
if [ ! -f "$LINE" ]; then
echo
scp /var/backup/Xtrabackup/qeekapay/$LINE 10.10.40.108:/backup/Xtrabackup/qeekapay
fi
done
-------------------
newmall从库同步故障 :
Last_Errno: 1060
Last_Error: Error 'Duplicate column name 'app_mobile_check'' on query. Default database: 'uc'. Query: 'ALTER TABLE application ADD COLUMN app_mobile_check TINYINT(1) NOT NULL DEFAULT '1' COMMENT '渠道用户手机验证标识:1:需要强制绑定手机;2:无需强制绑定手机''
Skip_Counter: 0
手动的解决方法是告诉从库slave_sql线程让他忽略掉这个错误继续执行:
mysql>set global sql_slave_skip_counter=1;
mysql>start slave;
--------------------
mysql -h10.10.20.115 -P3309 -uusvr_ssis -p'Ss@123&Ui#Bi'
SELECT Source_HostName,Source_IPAddress,Source_Port,Source_DBname,Source_Table,Target_HostName,Target_IPAddress,Target_Port,Target_DBname,Target_Table,IsWhere,IsSynchro,synType,Schedule,
IsFirst,id,JoinCondition,IsEncryption,EncryptionColumn from ConfigDB.ServerList_Publish where Schedule='day' and IsSynchro=1 and TargetType=2
order by Source_IPAddress;
mysql> "SELECT Source_HostName,Source_IPAddress,Source_Port,Source_DBname,Source_Table,Target_HostName,Target_IPAddress,
20.114
mysqldump -uadmin -p -h10.10.20.114 --skip-tz-utc --opt --default-character-set=utf8 analytics actions_sign_up -T /usr/backup/analytics
mysqldump -uroot -p111111 -h10.10.20.114 analytics actions_sign_up /usr/backup/analytics/a.sql
mysqldump -uadmin -p -h10.10.20.114 --skip-tz-utc --opt --default-character-set=utf8 analytics actions_sign_up -T /usr/backup/analytics
mysqldump -uadmin -p -h10.10.20.114 --skip-tz-utc --opt --default-character-set=utf8 analytics actions -T /usr/backup/analytics
admin123
mysqldump -uroot -p111111 -h10.10.20.114 --skip-tz-utc --opt --default-character-set=utf8 analytics actions_sign_up -T /usr/backup/analytics
/usr/bin/mysqldump -h$Source_IPAddress -P$Source_Port -uusvr_ssis -p'Ss@123&Ui#Bi' --opt $Source_DBname $Source_Table
--where="${IsWhere}" >/data/DataSync/${Source_DBname}/${Source_Table}_tmp.sql;
mysqldump -uadmin -padmin123 -h10.10.20.114 --skip-tz-utc --opt --default-character-set=utf8 analytics actions_sign_up -T /usr/backup/analytics/1 --fields-terminated-by=',' --lines-terminated-by='\n' --fields-optionally-enclosed-by='"' --fields-escaped-by='\\'
mysqldump -uadmin -padmin123 -h10.10.20.114 --opt --default-character-set=utf8 analytics actions_sign_up -T /usr/backup/analytics/1 --fields-terminated-by=',' --lines-terminated-by='\n' --fields-optionally-enclosed-by='"' --fields-escaped-by='\\'
mysqldump -uadmin -padmin123 -h10.10.20.114 --default-character-set=utf8 --opt -t analytics actions_sign_up >/usr/backup/analytics/2/up.sql
le >/data/DataSync/${Source_DBname}/${Target_Table}.sql;
mysqldump -uadmin -padmin123 -h10.10.20.114 --opt --default-character-set=utf8 ${db} -T ${dir}/${db} --fields-terminated-by=',' --lines-terminated-by='\n' --fields-optionally-enclosed-by='"' --fields-escaped-by='\\'
select * from driver into outfile 'a.txt';
mysql> select count(*) from actions_sign_up;
+----------+
| count(*) |
+----------+
| 8005001 |
+----------+
mysqldump 导出 csv 格式 --fields-terminated-by=, :字段分割符;
/Chunbai/app/mysql/bin/mysqldump -u xueyu2gm -p lszm_reku0 cdkey_info -t -T /data/mysql/data/ --fields-terminated-by=','
1、lszm_reku0 :导出的数据库;
2、cdkey_info:导出的数据表;
3、-t :不导出create 信息;
4、-T 指定到处的位置,注意目录权限;
5、--fields-enclosed-by=\" :字段引号;
6、--fields-terminated-by=, :字段分割符;
或者
mysql> select key_info,class,type
-> into outfile './mysqldump_bak/key_info.txt'
-> fields terminated by ','
-> lines terminated by '\n'
-> from lszm_reku0.cdkey_info limit 100;
Query OK, 100 rows affected (0.00 sec)
-------------------
可以使用下面的mysqld选项来影响记录到二进制日志知的内容。又见选项后面的讨论。
· --binlog-do-db=db_name
告诉主服务器,如果当前的数据库(即USE选定的数据库)是db_name,应将更新记录到二进制日志中。其它所有没有明显指定的数据库 被忽略。如果使用该选项,你应确保只对当前的数据库进行更新。
在同一台机器上运行多个MySQL服务器
至少下面的选项对每个服务器必须是不同的:
· --port=port_num
--port控制着TCP/IP连接的端口号。
--socket=path
--socket控制Unix中的Unix套接字文件路径和在Windows中的命名管道名称。
--pid-file=path
该选项只在Unix中使用。它指出服务器在其中写入进程ID的文件名
-------------
Linux bash实现
#!/bin/bash
for ((i=0;i<100;i++));do
{
#并发测试代码可放这儿
echo $i>>aa && echo $i
}&
done
wait
cat aa|wc -l
rm aa
------------------
http://hatemysql.com/tag/master_log_file%EF%BC%8Cread_master_log_pos%EF%BC%9Brelay_log_file%EF%BC%8Crelay_log_pos%EF%BC%9Brelay_master_log_file%EF%BC%8Cexec_master_log_pos/
由I/O线程更新master.info文件。文件中的行和SHOW SLAVE STATUS显示的列的对应关系为
行
描述
文件中的行号
Master_Log_File IO thread读取到的binlog日志文件
Read_Master_Log_Pos IO thread读取到的binlog日志文件位置
Master_Host
Master_User
密码(不由SHOW SLAVE STATUS显示)
Master_Port
Connect_Retry
Master_SSL_Allowed
Master_SSL_CA_File
Master_SSL_CA_Path
Master_SSL_Cert
Master_SSL_Cipher
Master_SSL_Key
由SQL线程更新relay-log.info文件。文件中的行和SHOW SLAVE STATUS显示的列的对应关系为:
Relay_Log_File slave 在本地缓存的relay 日志的文件名
Relay_Log_Pos slave 在本地缓存的relay 日志的文件位置
Relay_Master_Log_File SQL thread 执行到master的binlog文件名
Exec_Master_Log_Pos SQL thread 执行到master的binlog文件位置
当备份从服务器的数据时,你还应备份这两个小文件以及中继日志文件。它们用来在恢复从服务器的数据后继续进行复制。
如果丢失了中继日志但仍然有relay-log.info文件,你可以通过检查该文件来确定SQL线程已经执行的主服务器中二进制日志的程度。
然后可以用Master_Log_File和Master_LOG_POS选项执行CHANGE MASTER TO来告诉从服务器重新从该点读取二进制日志。当然,
要求二进制日志仍然在主服务器上。
如果中继日志坏了,则通过relay-log.info(Relay_Master_Log_File,Exec_Master_Log_Pos)可以知道SQL线程己执行的位置,然后通过修改change master_log_file,master_log_pos来重新
获取binlog日志到中继日志中
Master_Log_File,Read_Master_Log_Pos 记录了IO thread读到的当前master binlog文件和位置,对应master的binlog文件和位置。
Relay_Log_File,Relay_Log_Pos记录了SQL thread执行到relay log的那个文件和位置,对应的是slave上的relay log文件和位置。
Relay_Master_Log_File,Exec_Master_Log_Pos记录的是SQL thread执行到master binlog的文件和位置,对应的master上binlog的文件和位置。
-----------
在NUMA处理器绑定多实例到固定核心
首先编写自定义启动脚本如下:
#!/bin/sh
# Program Path
NUMACTL=`which numactl`
MYSQLD=/usr/alibaba/mysql/libexec/mysqld
PS=`which ps`
GREP=`which grep`
CUT=`which cut`
WC=`which wc`
EXPR=`which expr`
# Variables
CPU_BIND=(`$NUMACTL --show | $GREP nodebind | $CUT -d: -f2 `) # CPU bins list
CPU_BIND_NUM=${#CPU_BIND[@]} # How many CPU binds
MYSQLD_NUM=`$PS aux | $GREP mysqld | $GREP -v grep | $GREP '\<mysqld\>' | $WC -l`
MYSQLD_NUM=`$EXPR $MYSQLD_NUM + 1`
BIND_NO=`$EXPR $MYSQLD_NUM % $CPU_BIND_NUM ` # Calc Which CPU to Bind
# echo CMD
echo "$NUMACTL --cpunodebind=$BIND_NO --localalloc $MYSQLD" > /tmp/mysqld.$MYSQLD_NUM
# use exec to avoid having an extra shell around.
exec $NUMACTL --cpubind=$BIND_NO --localalloc $MYSQLD "$@"
-----------
MySQL删除大表更快的DROP TABLE办法
ln test_test.ibd test_test.id.hdlk
drop table test_test;
rm test_test.id.hdlk
---------------
3. 大批量乱序数据导入InnoDB很慢如何解决?
InnoDB因为主键聚集索引的关系,如果没有主键或者主键非序列的情况下,导入会越来越慢,如何快速的迁移数据到InnoDB?
借助MyISAM的力量是很靠谱的,先关闭InnoDB的Buffer Pool,把内存空出来,建一张没有任何索引的MyISAM表,然后只管插入吧,
concurrent_insert=2,在文件末尾并发插入,速度刚刚的,插入完成后,ALTER TABLE把索引加上,记得还有ENGINE=InnoDB,
就把MyISAM转到InnoDB了,这样的速度远比直接往InnoDB里插乱序数据来得快。
---
自动生成索引的脚本
/AutoCreateIndex.pl -e “你的SQL”
./AutoCreateIndex.pl -e "SELECT * FROM foo1 a,foo2 b WHERE 'fdsfsa'=a.col1 AND b.col2 = 'abc' AND a.col1=b.col2 AND a.col3 >1 ORDER BY b.col3,b.col1"
ALTER TABLE foo1 ADD INDEX idx (col1,col3);
ALTER TABLE foo2 ADD INDEX idx (col2,col3,col1);
./AutoCreateIndex.pl -e "SELECT * FROM t1 where t1 =1 and t2=2"
ALTER TABLE t1 ADD INDEX idx (t2,t1);
./AutoCreateIndex.pl -e "SELECT * FROM t1 where col1=1 and col2=4"
ALTER TABLE t1 ADD INDEX idx (col1,col2);
./AutoCreateIndex.pl -e "SELECT * FROM t1 where col1>1 and col2=4"
ALTER TABLE t1 ADD INDEX idx (col2,col1);
perl -MCPAN -e shell
install DBI
force install DBD::mysql
----------------
MyRndTest随机测试脚本
MyRndTest脚本用于生成随机测试SQL并发送到数据库执行。
目前支持生成随机主键查询SQL,类似:SELECT * FROM table WHERE ID = ?。
支持多线程并发测试,将继续改进支持自定义格式SQL随机填充条件进行测试。
用法:myrndtest.pl -u 用户名 -p 密码 -h 数据库主机 -d 目标数据库 -t 要测试的表 -c 每线程执行SQL次数 -n 线程数 -m 测试模式,暂时只支持0,默认0
猛击下载:
MyRndTest.pl (4.3 KiB, 847 hits)
----------------
利用mysqlbinlog进行集群备份的设想
怎么做呢,首先了解下MySQL Replication怎么做的,首先一个Slave IO线程从Master读取binlog,然后解析到Relay-log,另一个Slave SQL线程异步的从Relay-log中读取SQL应用到本地。
mysqlbinlog有一个参数read-from-remote-server,可以从远程读取binlog,只要创建一个有Replication Client权限的用户即可,这就模拟了Slave IO线程的作用。
mysqlbinglog –read-from-remote-server -u repl -p -h target_node –start-datetime=’2010-09-01 00:00:00′ –stop-datetime=’2010-09-01 23:59:59′
通过这条命令就可以获取到2010-09-01这一天的全部SQL,这些SQL可以直接导入到数据库,也可以写到Relaylog,让SLave SQL线程去执行。
--------------
利用临时表清除数据库重复数据
模拟数据:
create table t1 (id int,name varchar(20));
mysql> select * from tb1;
+------+-------+
| id | name |
+------+-------+
| 1 | chen |
| 1 | chen2 |
| 2 | hao |
| 3 | an |
| 3 | wang |
| 4 | haha |
+------+-------+
alter table t1 rename to tb1;
首先查看重复数据的数量:
SELECT id,count(id) FROM tb1 GROUP BY id HAVING count(id)> 1;
然后创建一张临时表,把上述结果存下来,这就是存在重复的数据各选出一条:
CREATE TABLE tb1_tmp_1 SELECT * FROM tb1 GROUP BY id HAVING count(id)> 1;
然后通过上述临时表与原表关联,获取全部存在重复的数据:
CREATE TABLE tb1_tmp_2 SELECT b.* FROM tb1_tmp_1 a, tb1 b WHERE a.id = b.id;
验证一下有没有选错,即有没有不重复的数据被选出来了:
SELECT b.* FROM tb1_tmp_2 a, tb1 b WHERE a.id= b.id;
全量删除所有重复数据:
delete from tb1 where tb1.id in (select id from tb1_tmp_2);
将原重复数据中的一条都插入数据库中:
insert into tb1 select * from tb1_tmp_1;
都做完就可以加上唯一索引了:
ALTER TABLE tbl ADD UNIQUE uk_tbl_c1_c2 (`c1`,`c2`) ;
-----------------
update mysql.user t set t.grant_priv='Y' where t.user='chenh';
select Grant_priv from mysql.user where user='chenh';
------------------
mysql 导出txt文件
字段之间的分隔和记录(行)之间的分隔默认是\t(即Tab)和\n。但可以改变,如:
FIELDS TERMINATED BY ',' --字段用,进行分隔
LINES TERMINATED BY ';' --记录用; 进行分隔
user表是utf8编码,excel默认编码格式是GBK,excel直接打开时候是一堆乱码,导出时候加上CHARACTER SET gbk即可
select * from R_JOB into outfile '/tmp/r_job_test.txt' character set utf8 FIELDS TERMINATED BY ','
LINES TERMINATED BY ';';
truncate table r_job_test;
load data infile '/tmp/r_job_test.txt' into table myke.r_job_test;
MySQL中导出CSV格式数据的SQL语句:
select * from r_job_test into outfile '/tmp/r_job_test.csv' CHARACTER SET utf8
fields terminated by ',' optionally enclosed by '"' escaped by '"'
lines terminated by '\r\n';
MySQL中导入CSV格式数据的SQL语句:
load data infile '/tmp/r_job_test.csv' into table r_job_test
fields terminated by ',' optionally enclosed by '"' escaped by '"'
lines terminated by '\r\n';
select host,clock,value from ifin_07150650 into outfile '/tmp/ifin_07150650.csv' CHARACTER SET utf8
fields terminated by ',' optionally enclosed by '"' escaped by '"'
lines terminated by '\r\n';
show warnings;
里面最关键的部分就是格式参数
fields terminated by ',' optionally enclosed by '"' escaped by '"'
lines terminated by '\r\n'
这个参数是根据RFC4180文档设置的,该文档全称Common Format and MIME Type for Comma-Separated Values (CSV) Files,其中详细描述了CSV格式,其要点包括:
(1)字段之间以逗号分隔,数据行之间以\r\n分隔;
(2)字符串以半角双引号包围,字符串本身的双引号用两个双引号表示。
SELECT * FROM R_JOB INTO OUTFILE '/tmp/r_job_test.txt'
CHARACTER SET utf8
FIELDS
TERMINATED BY '\、'
OPTIONALLY ENCLOSED BY '\"'
LINES
STARTING BY '\>'
TERMINATED BY '\r\n';
SELECT * FROM mysql.user INTO OUTFILE '/tmp/mysql_user.txt'
CHARACTER SET utf8
FIELDS
TERMINATED BY '\、'
OPTIONALLY ENCLOSED BY '\"'
LINES
STARTING BY '\>'
TERMINATED BY '\r\n';
>"localhost"、"slow"、""、"Y"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、""、""、""、""、0、0、0、0、""、\N
>"%"、"slow"、"*94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29"、"Y"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、"N"、""、""、""、""、0、0、0、0、""、\N
>"%"、"adminch"、"*B605AE15C73F51DA63BFE3111E968C0A410E4F4D"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"N"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、""、""、""、""、0、0、0、0、""、\N
>"%"、"root"、"*6BB4837EB74329105EE4568DDA7DC67ED2CA2AD9"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、"Y"、""、""、""、""、0、0、0、0、""、\N
create table chuser as select * from mysql.user where 1=2;
load data infile '/tmp/mysql_user.txt' into table chuser
FIELDS
TERMINATED BY '\、'
OPTIONALLY ENCLOSED BY '\"'
LINES
STARTING BY '\>'
TERMINATED BY '\r\n';
(f1,f2,f3,f4);
[root@analytics27 copy]# more dump_copy_active.sh
#!/bin/bash
source /etc/profile
source /etc/bashrc
source ${HOME}/.bash_profile
set -e -u
dir=/data/copy
declare -a dbs=(active)
for db in ${dbs[@]}
do
mysqldump -u root --opt --default-character-set=utf8 ${db} -T ${dir}/${db} --fields-terminated-by=',' --lines-terminated-by='\n' --fields-optionally-enclosed-
by='"' --fields-escaped-by='\\'
date '+%Y%m%d' >${dir}/${db}/copy.ok
done
./shop_koubei.sh
mysqldump -uroot -p123456 --opt --default-character-set=utf8 mysql -T /tmp/db/ --fields-terminated-by=',' \
--lines-terminated-by='\n' --fields-optionally-enclosed-by='"' --fields-escaped-by='\\'
--fields-optionally-enclosed-by
------------------------------------------------------
导出导入:OK
select * from mysql.user into outfile '/tmp/mysql_user2.txt' character set utf8 lines terminated by '\r\n' ;
load data infile '/tmp/mysql_user2.txt' into table chuser2;
select * from id into outfile '/tmp/id.txt' character set utf8 lines terminated by '\n' ;
load data infile '/tmp/id.txt' into table id;
[root@db1 db]# more /tmp/id.txt
1 chen
2 陈浩
select * into outfile '/tmp/id_1.txt' fields terminated by ',' optionally enclosed by '"' lines terminated by '\n' from myke.id;
load data infile '/tmp/id_1.txt' into table myke.id fields terminated by ',' optionally enclosed by '"' lines terminated by '\n';
[root@db1 tmp]# more id_1.txt
","chen
","陈浩
set names latin1;
select * into outfile '/tmp/id_2.txt' fields terminated by ',' lines terminated by '\n' from myke.id;
load data infile '/tmp/id_2.txt' into table myke.id fields terminated by ',' optionally enclosed by '"' lines terminated by '\n';
[root@db1 tmp]# more /tmp/id_2.txt
1,chen
2,陈浩
FIELDS TERMINATED BY ',' 字段间分割符
OPTIONALLY ENCLOSED BY '"' 将字段包围 对数值型无效
LINES TERMINATED BY '\n' 换行符
---------------------------
http://blog.csdn.net/gyanp/article/details/7109314
create table myid (id int,name1 char(10),name2 char(13));
insert into myid values (1,'chenchen01','chen01chen011');
insert into myid values (2,'chen02','chen02');
mysql> select * from myid;
+------+------------+---------------+
| id | name1 | name2 |
+------+------------+---------------+
| 1 | chenchen01 | chen01chen011 |
| 2 | chen02 | chen02 |
+------+------------+---------------+
MySQL 去除字段中的换行和回车符
解决方法:
UPDATE tablename SET field = REPLACE(REPLACE(field, CHAR(10), ”), CHAR(13), ”);
char(10): 换行符
char(13): 回车符
直接生成csv格式文件
mysqldump -uroot -p123456 -T /tmp/db/ --fields-terminated-by="," --fields-enclosed-by="" \
--lines-terminated-by="\n" --fields-escaped-by="" myke id
或者:
mysqldump -uroot -p123456 -T /tmp/db/ --fields-terminated-by="," --fields-enclosed-by="" \
--lines-terminated-by="\n" --fields-escaped-by="" myke id
------------------------------
ok:导出单个表csv,然后导入进去
mysqldump -uroot -p123456 -T /tmp/db/ --fields-terminated-by="," --fields-enclosed-by="" \
--lines-terminated-by="\n" --fields-escaped-by="" myke myid
[root@db1 db]# more myid.txt
1,chenchen01,chen01chen011
2,chen02,chen02
load data infile '/tmp/db/myid.txt' into table myid fields terminated by ',' optionally enclosed by "" lines terminated by '\n';
用Excel打开导出的txt文件,要注意用逗号做为分割符,这样可以将各个字段分开
load data infile '/tmp/mysql_user.txt' into table chuser
FIELDS
TERMINATED BY '\、'
OPTIONALLY ENCLOSED BY '\"'
LINES
STARTING BY '\>'
TERMINATED BY '\r\n';
(f1,f2,f3,f4);
----------------------------
导出单个数据表table:ok
mysqldump -uroot -p123456 myke id > /tmp/iddump.sql
会生成下面的SQL
INSERT INTO `id` VALUES (1,'chen\r'),(2,'陈浩\r');
vi /tmp/iddump.sql
-- DROP TABLE IF EXISTS `id`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
-- CREATE TABLE `id` (
-- `id` int(11) DEFAULT NULL,
-- `name` varchar(20) DEFAULT NULL
-- ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
将表的DDL语句注释掉
mysql -uroot -p123456 myke < /tmp/iddump.sql
mysql> select * from id;
+------+---------+
| id | name |
+------+---------+
| 1 | chen
| 2 | 陈浩
--------------
从服务器切换主服务器
应该用--logs-bin选项而不用 --logs-slave-updates选项运行从服务器。这样,一旦你在其它从服务器上发出STOP SLAVE; RESET MASTER,
以及CHANGE MASTER TO语句,该从服务器可以切换为主服务器。例如,假设有下面的设置
auto_increment_offset确定AUTO_INCREMENT列值的起点
auto_increment_increment确定每次增加值
auto_increment_offset=5,则从5开始
auto_increment_increment=10,则每次加10
--------------------
一次主从数据库切换的实战
看看新闻网>看引擎>程序开发
前几天北京机房的奇数数据库突然不工作了,经过紧急调试后,发现是raid5中的一块硬盘坏掉了,导致系统
启动失败。启动紧急应对方案,停掉主库,启动从库,让从库工作。
首先是在从库上stop slave;
然后让所有的数据代理指向从库,让平台正常运营。
经过工程师维修后,主库恢复正常,但不是很信任这台机器了,准备让他做从库使用,于是就来了一场主从切换的
实战。
从库:
stop slave;
reset master;
reset slave;
show processlist;
show master status;
记住log以及pos。
主库:
reset master;
reset slave;
show processlist;
然后就是进行change master的操作,让主库变成从库,并且让他指向上面的从库(后来的主库)。
最后 start slave;
为了确保切换是否顺利,两台服务器上分别:
show processlist;
然后在从服务器上看看
show slave status;
看看是不是有crash的记录,我这次就遇到了,通过清理数据库,然后重新同步后解决。
注意的是,要保证所有的serverid 不同。
----------------
HeartBeat + DRBD以及MySQL replication是很多企业比较普遍使用的方式。对于数据的完整性和一致性的问题,
这两种架构需要考虑2个重要的参数innodb_flush_log_at_trx_commit以及sync_binlog参数。
本文主要参考了MySQL 5.6 Reference Manual列出对这2个参数的具体描述。
innodb_flush_log_at_trx_commit (这个很管用)
默认值1的意思是每一次事务提交或事务外的指令都需要把日志写入(flush)硬盘,这是很费时的
innodb_flush_log_at_trx_commit = 1,这也是Innodb 的默认设置。我们每次事务的结束都会触发Log Thread 将log buffer 中的数据写入文件
并通知文件系统同步文件。这个设置是最安全的设置,能够保证不论是MySQL Crash 还是OS Crash 或者是主机断电都不会丢失任何已经提交的数据。
innodb_flush_log_at_trx_commit = 2,当我们设置为2 的时候,Log Thread 会在我们每次事务结束的时候将数据写入事务日志,
但是这里的写入仅仅是调用了文件系统的文件写入操作。而我们的文件系统都是有缓存机制的,所以Log Thread 的这个写入并不能保证内容真的已经
写入到物理磁盘上面完成持久化的动作。
设成0会更快一点,但安全方面比较差,即使MySQL挂了也可能会丢失事务的数据。而值2只会在整个操作系统 挂了时才可能丢数据。
------------
153
[root@sjhqweb153 ~]# ps -ef |grep mysql
root 19757 1 0 May19 ? 00:00:00 /bin/sh /usr/local/mysql/bin/mysqld_safe --datadir=/usr/local/mysql/data --pid-file=/usr/local/mysql/data/sjhqweb153.pid
mysql 20065 19757 0 May19 ? 00:59:24 /usr/local/mysql/bin/mysqld --basedir=/usr/local/mysql --datadir=/usr/local/mysql/data --plugin-dir=/usr/local/mysql/lib/plugin --user=mysql --log-error=/usr/local/mysql/data/sjhqweb153.err --pid-file=/usr/local/mysql/data/sjhqweb153.pid
mysqld.server restart
mysql> show status like '%qcache%';
+-------------------------+----------+
| Variable_name | Value |
+-------------------------+----------+
| Qcache_free_blocks | 1 |
| Qcache_free_memory | 67085000 |
| Qcache_hits | 1 |
| Qcache_inserts | 17 |
| Qcache_lowmem_prunes | 0 |
| Qcache_not_cached | 10 |
| Qcache_queries_in_cache | 4 |
| Qcache_total_blocks | 12 |
+-------------------------+----------+
Qcache_hits:表示有多少次命中缓存。我们主要可以通过该值来验证我们的查询缓存的效果。数字越大,缓存效果越理想。
Qcache_inserts: 表示多少次未命中然后插入,意思是新来的SQL请求在缓存中未找到,不得不执行查询处理
Qcache_lowmem_prunes:该参数记录有多少条查询因为内存不足而被移除出查询缓存。通过这个值,用户可以适当的调整缓存大小。
如果Qcache_free_blocks值过大,可能是query_cache_min_res_unit值过大,应该调小些
query_cache_min_res_unit 查询缓存分配的最小块的大小(字节)
查询缓存命中率的计算公式是:Qcache_hits/(Qcache_hits + Com_select)。
mysql> show status like '%Com_select%';
Qcache_hits | 2423
show status like '%Com_select%';
| Com_select | 7 |
命中率:select 2423/(2423+7);
select SQL_NO_CACHE count(*) from users where email = 'hello';
-----------------------
http://biancheng.dnbcw.info/mysql/253667.html
表连接顺序
在oracle中可以指定的表连接的hint有很多:ordered hint 指示oracle按照from关键字后的表顺序来进行连接;
leading hint 指示查询优化器使用指定的表作为连接的首表,即驱动表;
use_nl hint指示查询优化器使用nested loops方式连接指定表和其他行源,并且将强制指定表作为inner表。
在mysql中就有之对应的straight_join,由于mysql只支持nested loops的连接方式,所以这里的straight_join类似oracle中的use_nl hint。
mysql优化器在处理多表的关联的时候,很有可能会选择错误的驱动表进行关联,导致了关联次数的增加,从而使得sql语句执行变得非常的缓慢,
这个时候需要有经验的DBA进行判断,选择正确的驱动表,这个时候straight_join就起了作用了,下面我们来看一看使用straight_join进行优
MySQL STRAIGHT_JOIN 与 NATURAL JOIN
STRAIGHT_JOIN 是 MySQL 对标准 SQL 的扩展,用于在多表查询时指定表载入的顺序。在 JOIN 表连接中,同样可以指定表载入的顺序
如果有更多表进行连接,那么使用 STRAIGHT_JOIN 后,其载入顺序就遵循从左往右的规则。最后,STRAIGHT_JOIN 无法应用于 LEFT JOIN 或 RIGHT JOIN。
MySQL STRAIGHT_JOIN 语法如下:
... FROM table1 STRAIGHT_JOIN table2 ON condition ...
STRAIGHT_JOIN 实际上与内连接 INNER JOIN 表现完全一致,不同的是使用了 STRAIGHT_JOIN 后,table1 会先于 table2 载入。
如果有更多表进行连接,那么使用 STRAIGHT_JOIN 后,其载入顺序就遵循从左往右的规则。最后,STRAIGHT_JOIN 无法应用于 LEFT JOIN 或 RIGHT JOIN。
set explain_level=verbose;
explain select count(*) from big join medium where big.id = medium.id;
查看索引:
show index from test_log;
explain SELECT DATE(practicetime) date_time,COUNT(DISTINCT a.userid) people_rows
FROM USER b straight_join test_log a
WHERE a.userid=b.userid AND b.isfree=0 AND LENGTH(b.username)>4
GROUP BY DATE(practicetime)\G;
指定驱动表:user
-----------------
MySql sql优化之order by desc/asc limit M
SELECT * FROM test_order_desc WHERE END_TIME>now() ORDER BY GMT_CREATE DESC,count_num DESC LIMIT 12, 12;
创建索引:
在order by 后面条件中加索引
ALTER TABLE test_order_desc ADD INDEX ind_gmt_create(gmt_create,count_num);
mysql强制索引和禁止某个索引
1、mysql强制使用索引:force index(索引名或者主键PRI)
select * from table force index(PRI) limit 2;(强制使用主键)
2、mysql禁止某个索引:ignore index(索引名或者主键PRI)
-----------------------------
MYSQL---基于开销的执行计划探讨
http://biancheng.dnbcw.info/mysql/253667.html
创建示例数据库:
http://ari.iteye.com/blog/1066690
https://launchpad.net/test-db/employees-db-1/1.0.6
tar -xjf $HOME/Downloads/employees_db-full-1.0.4.tar.bz2
//解压缩,进入目录
cd employees_db/
//导入数据库root为用户名
mysql -t -u root -p < employees.sql
创建MySQL示例数据库的过程(menagerie)
相关脚本:http://downloads.mysql.com/docs/menagerie-db.zip
见README
select * from pet;
select * from event;
scott示例数据库:
create database if not exists `scott`;
USE `scott`;
DROP TABLE IF EXISTS `dept`;
CREATE TABLE `dept` (
`DEPTNO` decimal(2,0) NOT NULL,
`DNAME` varchar(14) default NULL,
`LOC` varchar(13) default NULL,
PRIMARY KEY (`DEPTNO`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
insert into `dept`(`DEPTNO`,`DNAME`,`LOC`) values ('10','ACCOUNTING','NEW YORK'),('20','RESEARCH','DALLAS'),('30','SALES','CHICAGO'),('40','OPERATIONS','BOSTON');
DROP TABLE IF EXISTS `emp`;
CREATE TABLE `emp` (
`EMPNO` decimal(4,0) NOT NULL,
`ENAME` varchar(10) default NULL,
`JOB` varchar(9) default NULL,
`MGR` decimal(4,0) default NULL,
`HIREDATE` datetime default NULL,
`SAL` decimal(7,2) default NULL,
`COMM` decimal(7,2) default NULL,
`DEPTNO` decimal(2,0) default NULL,
PRIMARY KEY (`EMPNO`),
KEY `FK_DEPTNO` (`DEPTNO`),
CONSTRAINT `FK_DEPTNO` FOREIGN KEY (`DEPTNO`) REFERENCES `dept` (`DEPTNO`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
insert into `emp`(`EMPNO`,`ENAME`,`JOB`,`MGR`,`HIREDATE`,`SAL`,`COMM`,`DEPTNO`) values ('7369','SMITH','CLERK','7902','0000-00-00 00:00:00','800.00',NULL,'20'),('7499','ALLEN','SALESMAN','7698','0000-00-00 00:00:00','1600.00','300.00','30'),('7521','WARD','SALESMAN','7698','0000-00-00 00:00:00','1250.00','500.00','30'),('7566','JONES','MANAGER','7839','0000-00-00 00:00:00','2975.00',NULL,'20'),('7654','MARTIN','SALESMAN','7698','0000-00-00 00:00:00','1250.00','1400.00','30'),('7698','BLAKE','MANAGER','7839','0000-00-00 00:00:00','2850.00',NULL,'30'),('7782','CLARK','MANAGER','7839','0000-00-00 00:00:00','2450.00',NULL,'10'),('7788','SCOTT','ANALYST','7566','0000-00-00 00:00:00','3000.00',NULL,'20'),('7839','KING','PRESIDENT',NULL,'0000-00-00 00:00:00','5000.00',NULL,'10'),('7844','TURNER','SALESMAN','7698','0000-00-00 00:00:00','1500.00','0.00','30'),('7876','ADAMS','CLERK','7788','0000-00-00 00:00:00','1100.00',NULL,'20'),('7900','JAMES','CLERK','7698','0000-00-00 00:00:00','950.00',NULL,'30'),('7902','FORD','ANALYST','7566','0000-00-00 00:00:00','3000.00',NULL,'20'),('7934','MILLER','CLERK','7782','0000-00-00 00:00:00','1300.00',NULL,'10');
select count(*) from emp;
14
select count(*) from dept;
4
今天在和网友Mablevi探讨连接查询使用索引的情况,在改变连接顺序后,用EXPLAIN查看执行计划,都是小表在前面,大表在后面。我们知道MYSQL执行连接查询的时候,使用的叫“内循环”的技术,即依次取出外层循环的表(驱动表)的记录,然后到内层循环表(被驱动表)去查找符合条件的记录。驱动表是依次循环,全表扫描的,被驱动表可以通过索引查找,提高效率。
同时我们知道,MYSQL的执行计划是会去计算开销的,一个SQL查询可能会产生好几百种执行计划,根据开销最小原则,EXPLAIN出来的,就是经过挑选后产生的最优开销。下面我们来实际操作验证一下MYSQL的执行技术是否真的够“聪明”。
show index from emp;
show index from dept;
关联查询
select sql_no_cache count(*) from emp inner join dept on emp.DEPTNO=dept.DEPTNO;
explain select sql_no_cache count(*) from emp inner join dept on emp.DEPTNO=dept.DEPTNO;
+----+-------------+-------+-------+---------------+-----------+---------+-------------------+------+--------------------------+
| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+-------+---------------+-----------+---------+-------------------+------+--------------------------+
| 1 | SIMPLE | dept | index | PRIMARY | PRIMARY | 1 | NULL | 4 | Using index |
| 1 | SIMPLE | emp | ref | FK_DEPTNO | FK_DEPTNO | 2 | scott.dept.DEPTNO | 1 | Using where; Using index |
+----+-------------+-------+-------+---------------+-----------+---------+-------------------+------+--------------------------+
正常情况下,是小表驱动大小:dept表(4条),emp表(14条),看执行计划,先扫描dept表,然后再通过索引扫描emp表。
最后执行的SQL开销:
mysql> show status like 'Last_query_cost';
+-----------------+----------+
| Variable_name | Value |
+-----------------+----------+
| Last_query_cost | 5.799000 |
+-----------------+----------+
强制指定表连接顺序,看看开销
select sql_no_cache count(*) from emp straight_join dept on emp.DEPTNO=dept.DEPTNO;
mysql> show status like 'Last_query_cost';
+-----------------+-----------+
| Variable_name | Value |
+-----------------+-----------+
| Last_query_cost | 17.799000 |
+-----------------+-----------+
执行成本变成17,是原来的3倍,效率不好。
执行计划解释:
ref 列:指出对 key 列所选择的索引的查找方式,常见的值有 const, func, NULL, 具体字段名。当 key 列为 NULL ,即不使用索引时,此值也相应的为 NULL 。
当 key 列为 NULL , ref 列也相应为 NULL
rows列:此列表示 MySQL 估计查询需要读取的行数
Extra列:
Using index:此查询使用了覆盖索引
Using where:表示 MySQL 服务器从存储引擎收到行后再进行“后过滤”(Post-filter)。所谓“后过滤”,就是先读取整行数据,再检查此行是否符合 where 句的条件,
符合就留下,不符合便丢弃。因为检查是在读取行后才进行的,所以称为“后过滤”。
type 为 rang、 ref 或者 index 的时候才有可能利用索引排序,其它,如 ALL ,都无法通过索引排序,此时若有 order by ,
如上例,便会出现 Using filesort 。
Using filesort:对于没有索引的表,只要 order by 必会出现 Using filesort 。
alter table article drop index ind_article_url;
alter table article add index ind_article_url(url);
select count(distinct left(url,76))/count(*) url_76,
-> count(distinct left(url,77))/count(*) url_77,
-> count(distinct left(url,78))/count(*) url_78,
-> count(distinct left(url,79))/count(*) url_79,
-> count(distinct left(url,80))/count(*) url_80,
-> count(distinct left(url,81))/count(*) url_81,
-> count(distinct left(url,82))/count(*) url_82,
-> count(distinct left(url,83))/count(*) url_83,
-> count(distinct left(url,84))/count(*) url_84,
-> count(distinct left(url,85))/count(*) url_85
-> from article;
alter table article add index ind_article_url(url(85));
ALTER TABLE people ADD INDEX fname_lname_age (firstname,lastname,age);
EXPLAIN SELECT peopleid FROM people WHERE firstname='Mike' AND lastname='Sullivan' AND age='17';
key_len:
索引中被使用部分的长度,以字节计。在本例中,key_len是102,其中firstname占50字节,lastname占50字节,age占2字节。如果MySQL只使用索引中的firstname部分,则key_len将是50。
在运行DDL之前,查看事务是否频繁,在运行DDL之后,开启另外一个会话,使用show processlist查看是否被metadata lock阻塞.
一旦阻塞,先Kill ddl的操作
找出重复的索引:
pt-duplicate-key-checker
功能为从mysql表中找出重复的索引和外键,这个工具会将重复的索引和外键都列出来,并生成了删除重复索引的语句
查看test数据库的重复索引和外键使用情况使用如下命令
pt-duplicate-key-checker --host=localhost --user=root --password=123456 --socket=/tmp/mysql.sock --databases=test
alter table t1 add index ind_name (name(10));
alter table t1 add index ind_name2 (name);
show index from t1;
----------------------------------
MySQL性能分析及explain的使用
http://blog.csdn.net/gevolution90/article/details/6856922
mysql索引使用率监控
show status like ‘Handler_read%’;
Handler_read_first 代表读取索引头的次数,如果这个值很高,说明全索引扫描很多。
Handler_read_key代表一个索引被使用的次数,如果我们新增加一个索引,可以查看Handler_read_key是否有增加,如果有增加,说明sql用到索引。
Handler_read_next 代表读取索引的下列,一般发生range scan。
Handler_read_prev 代表读取索引的上列,一般发生在ORDER BY … DESC。
Handler_read_rnd 代表在固定位置读取行,如果这个值很高,说明对大量结果集进行了排序、进行了全表扫描、关联查询没有用到合适的KEY。
Handler_read_rnd_next 代表进行了很多表扫描,查询性能低下。
----------------------------------
show warnings
--------------
数据类型:enum
enum有优点。但个人觉得。。。缺点更多,客观的讲:优点主要是在建数据 库的时候就可以把一些值给规范好。缺点是。。enum不适合PHP。
主要是PHP是弱类型,如:你insert into ..... set a= 1,你没法知道你是想 a= '1' 还是 a= 1(a='1'是插入值1,a=1是插入enum的第一个值,
尤其php弱类型的,如果int的,很少有人在sql里加双引号。),这是PHP和mysql 在使用enum 最大的问题。所以。。安心点啦。干脆点直接tinyint。
单曲观点:
我觉得没什么优点,对数字型的enum,简直就是梦魇,boolean tinyint(1) 0,1 status tinyint(1) 1,2,3,4,5,6..tinyint欢淫你~~。
如:audit_result enum(1,2,3),set audit_result = 1;...容易出现膘哥所说的混淆。
简单观点:
少用,一般都是用tinyint替代。
----------------
好的优化建议:
5. 在Join表的时候使用相当类型的例,并将其索引
如果你的应用程序有很多 JOIN 查询,你应该确认两个表中Join的字段是被建过索引的。这样,MySQL内部会启动为你优化Join的SQL语句的机制。
from users left join comm on (users.state=comm.state)
这2上state字段,应该被建立索引,而且是相同的类型,相同的字符集
而且,这些被用来Join的字段,应该是相同的类型的。例如:如果你要把 DECIMAL 字段和一个 INT 字段Join在一起,
MySQL就无法使用它们的索引。对于那些STRING类型,还需要有相同的字符集才行。(两个表的字符集有可能不一样)
10. 从 PROCEDURE ANALYSE() 取得建议
PROCEDURE ANALYSE() 会让 MySQL 帮你去分析你的字段和其实际的数据,并会给你一些有用的建议。只有表中有实际的数据,这些建议才会变得有用,因为要做一些大的决定是需要有数据作为基础的。
11. 尽可能的使用 NOT NULL
-------------
mysql 将时间戳直接转换成日期时间
SELECT FROM_UNIXTIME( 1249488000, '%Y%m%d' )
-----------------
CBO:
show table status like 'tablename';
如何收集统计信息
analyze table
show index from id;
Cardinality: 2 //要与实际的表数一致,数据才真实
analyze table id;
show index from id;
对于INNODB
1.表第一次打开时
2.表修改的行超过1/16或者20亿条
3.执行Show index/table或者查询infomation_schema_tables/statistics表时
统计信息相关参数
innodb_stats_on_metadata参数用来控制此行为,设置为false时不自动更新统计信息
innodb_stats._sample_pages每次收集统计信息时采集的页数,默认为8,建议适当加大这个
值,使统计信息更准确
查看执行计划
explain
------------------
表碎片优化
optimize table id; //不支持innodb引擎的表,只支持myisam表
mysql> optimize table id;
+---------+----------+----------+-------------------------------------------------------------------+
| Table | Op | Msg_type | Msg_text |
+---------+----------+----------+-------------------------------------------------------------------+
| myke.id | optimize | note | Table does not support optimize, doing recreate + analyze instead |
| myke.id | optimize | status | OK |
+---------+----------+----------+-------------------------------------------------------------------+
Optimize 正常都會出現 status ok (MyISAM), 但是 InnoDB 的都會出現此訊息: Table does not support optimize, doing recreate + analyze instead.
,在OPTIMIZE TABLE运行过程中,MySQL会锁定表。
所以對 InnoDB 的話, 就使用下述語法: (請自行將 table.name 改成自己的 Table 名稱)
ALTER TABLE table.name ENGINE='InnoDB';
alter table id engine='innodb';
這語法的作用會是: 建立一個新的 Table, 由舊的 Table 將資料拷貝進來, 然後再把舊的 Table 砍掉, 但是, 作者建議先備份後再來執行此動作比較好.
注意
-----------
MySQL InnoDB Replication Slave 太慢跟不上解法
vim /etc/mysql/conf.d/replication_slow.conf
[mysqld]
innodb_flush_log_at_trx_commit = 0
innodb_buffer_pool_size = 512M
sudo service mysql restart # 到此即可, 再來就會看到 Slave 會慢慢跟上進度囉~
---------------------------
共享表空间转独立表空间
show variables like '%per_table%';
innodb-file-per-table=1 //独立表空间
模拟共享表空间数据
create database sharedb default character set utf8;
use sharedb;
create table share1(id int,name varchar(20));
insert into share1 values (1,'chen');
查看
root@db3 data]# cd sharedb/
[root@db3 sharedb]# ll
-rw-rw----. 1 mysql mysql 61 May 29 11:26 db.opt
-rw-rw----. 1 mysql mysql 8586 May 29 11:26 share1.frm
mysqldump -uroot -p123456 sharedb share1>/tmp/sharedb.sql
将共享表空间更改为独立表空间,有2中方法:
1.先逻辑备份,然后修改my.conf中的 innodb_file_per_table参数=1,再将逻辑备份导入
2.只要修改innodb_file_per_table参数,然后将需要修改的所有innodb的表都运行一遍 alter table table_name engine=innodb;即可
测试如下:
mysql test <insert_CID.sql
mysql> select count(*) from app_device_info;
+----------+
| count(*) |
+----------+
| 592929 |
共享表空间:
只有定义文件,没有数据文件
[root@db3 test]# ll -h
total 12K
-rw-rw----. 1 mysql mysql 9.3K May 29 11:45 app_device_info.frm
将数据导出
mysqldump test >/tmp/test.sql
清掉数据,释放在共享表空间的空间
truncate table app_device_info;
修改为独立表空间参数
innodb-file-per-table=1
恢复数据
mysql test </tmp/test.sql
己改成独立表空间
[root@db3 test]# ll -h
total 97M
-rw-rw----. 1 mysql mysql 9.3K May 29 11:56 app_device_info.frm
-rw-rw----. 1 mysql mysql 96M May 29 11:56 app_device_info.ibd
----------------------
误删除innodb共享表空间:ok
[root@db3 ~]# sh indata.sh 100000
rm -f ibdata1 ib_logfile0 ib_logfile1
现在数据库还可以正常访问使用
ps -ef |grep mysqld
ls -la /proc/30118/fd/ | grep -e ibdata -e ib_
lrwx------. 1 root root 64 May 29 15:50 3 -> /usr/local/mysql/data/ibdata1 (deleted)
lrwx------. 1 root root 64 May 29 15:50 8 -> /usr/local/mysql/data/ib_logfile0 (deleted)
lrwx------. 1 root root 64 May 29 15:50 9 -> /usr/local/mysql/data/ib_logfile1 (deleted)
FLUSH TABLES WITH READ LOCK;
SHOW engine innodb STATUS\G;
LOG
---
Log sequence number 243347310
Log flushed up to 243347310
Last checkpoint at 243347310
checkpoint age 就是 Log sequence number的值减去 Last checkpoint at的值,
如果为0,那么表示所有的page都flush到硬盘文件中了。
select 243347310-243347310;
0
SET global innodb_max_dirty_pages_pct=0;
还要确保innodb不再进行写操作了:
Pending normal aio reads: 0 [0, 0, 0, 0] , aio writes: 0 [0, 0, 0, 0] ,
ibuf aio reads: 0, log i/o's: 0, sync i/o's: 0
Pending flushes (fsync) log: 0; buffer pool: 0
158 OS file reads, 28505 OS file writes, 1360 OS fsyncs
0.00 reads/s, 0 avg bytes/read, 0.00 writes/s, 0.00 fsyncs/s
然后把文件复制回去:
cp /proc/30118/fd/3 /usr/local/mysql/data/ibdata1
cp /proc/30118/fd/8 /usr/local/mysql/data/ib_logfile0
cp /proc/30118/fd/9 /usr/local/mysql/data/ib_logfile1
chown mysql:mysql *
unlock tables
service mysqld restart
#########################################
mysql -uroot -p123456 --execute="SHOW engine innodb STATUS\G;" > /tmp/t.txt;
---------------
7.2.8. MySQL如何优化DISTINCT
在许多情况下结合ORDER BY的DISTINCT需要一个临时表。
请注意因为DISTINCT可能使用GROUP BY,必须清楚MySQL如何使用所选定列的一部分的ORDER BY或HAVING子句中的列。参见12.10.3节,“具有隐含字段的GROUP BY”。
在大多数情况下,DISTINCT子句可以视为GROUP BY的特殊情况。例如,下面的两个查询是等效的:
SELECT DISTINCT c1, c2, c3 FROM t1 WHERE c1 > const;
优化:
SELECT c1, c2, c3 FROM t1 WHERE c1 > const GROUP BY c1, c2, c3;
结合LIMIT row_count和DISTINCT后,MySQL发现唯一的row_count行后立即停止。
7.2.10. MySQL如何优化嵌套Join
表示联接的语法允许嵌套联接。下面的讨论引用了13.2.7.1节,“JOIN语法”中描述的联接语法。
同SQL标准比较,table_factor语法已经扩展了。后者只接受table_reference,而不是括号内所列的。
table_reference项列表内的每个逗号等价于内部联接,这是一个保留扩展名。例如:
SELECT * FROM t1 LEFT JOIN (t2, t3, t4)
ON (t2.a=t1.a AND t3.b=t1.b AND t4.c=t1.c)
等价于:
SELECT * FROM t1 LEFT JOIN (t2 CROSS JOIN t3 CROSS JOIN t4)
ON (t2.a=t1.a AND t3.b=t1.b AND t4.c=t1.c)
----------------------
对于一个忙的服务器,我们建议采用使用三个键高速缓冲的策略:MYISAM
· 占用为所有键高速缓冲分配的空间的20%的“热”键高速缓冲。该缓存用于频繁用于搜索但没有更新的表。
· 占用为所有键高速缓冲分配的空间的20%的“冷”键高速缓冲。该缓存用于中等大小、大量修改的表,例如临时表。
· 占用键高速缓冲空间的20%的“温”键高速缓冲。使用它作为默认 键高速缓冲,默认情况被所有其它表使用。
key_buffer_size = 4G
hot_cache.key_buffer_size = 2G
cold_cache.key_buffer_size = 2G
-------------------
在Unix中,mysql客户程序向历史文件中写入已执行语句的一条记录。默认情况,历史文件名为.mysql_history并在根目录中重建。要想指定不同的文件,应设置MYSQL_HISTFILE环境变量的值。
如果不想要历史文件,首先删除.mysql_history(如果有),然后使用下面的任何一种方法:
· 将MYSQL_HISTFILE变量设到/dev/null。要想在每次登录时让该设置生效,将该设置放入外壳的一个启动文件中。
· 创建.mysql_history,作为一个符号链接指向/dev/null:
· shell> ln -s /dev/null $HOME/.mysql_history
只需要执行一次。
------------------
一、mysqlsla的安装
wget http://hackmysql.com/scripts/mysqlsla-2.03.tar.gz
tar zvxf mysqlsla-2.03.tar.gz
cd mysqlsla-2.03
perl Makefile.PL
make
make install
四、使用范例
1. 统计慢查询文件为dowload_server1-slow.log的所有select的慢查询sql,并显示执行时间最长的10条sql,并写到sql_time.sql中去
mysqlsla -lt slow -sf "+select" -top 10 dowload_server1-slow.log >test_time.log
2. 统计慢查询文件为dowload_server1-slow.log的数据库为ultraxsmutf8的所有select和update的慢查询sql,并查询次数最多的100条sql,并写到sql_num.sql中去
mysqlsla -lt slow -sf "+select,update" -top 100 -sort c_sum -db ultraxsmutf8 dowload_server1-slow.log >num_time.log
-------------------------
114数据库启动不了:
InnoDB: Last MySQL binlog file position 0 1020490096, file name /usr/local/mysql/logs/newsinfodb1.001474
150603 3:05:38 InnoDB: Started; log sequence number 2 2766228723
/usr/local/mysql/libexec/mysqld: File 'ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vdso]' not found (Errcode: 2)
150603 3:05:38 [ERROR] Failed to open log (file 'ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vdso]', errno 2)
150603 3:05:38 [ERROR] Could not open log file
150603 3:05:38 [ERROR] Can't init tc log
150603 3:05:38 [ERROR] Aborting
./mysqld_safe --defaults-file=/etc/my.cnf &
解决方法:
1:删除所有mysql-bin.0000*日志文件全部rm掉
cd /usr/local/mysql/logs/
2:修改在my.cnf 文件,找到 log-bin=mysql-bin 将log-bin改名
log-bin = /usr/local/mysql/logs/newsinfodb3
expire_logs_days=7
woodychen@qeeka.com
C:\Program Files (x86)\Java\jdk1.8.0_40\
--------------------
Waiting for table metadata lock
mysql 5.5.3以后增加metadata lock的缘故。这个锁限制了在一个没提交事务前执行DDL语句。让人迷惑的是就算只有select,只要没提交也会阻止DDL
一个没提交的事务使用了A表, 另外一个session 对A表进行alter,出现waiting for table metadata lock
结论:
在准备alter table tbl 的时候,先观察一下,有没有正在运行的,且在短时间内无法结束的sql语句在操作tbl表
5.1中由于没有引入meta data lock,在备份整个过程中并发DDL都会对其产生影响
5.5中引入meta data lock后,只是保证针对已经备份表的DDL会被block,只是降低了并发DDL影响的概率,
解决方式是在start transaction与unlock tables之间获取实例上全部表的meta data lock,但是当表数量很大时,这个操作可以很耗时,
而这个过程由于处于FLUSH TABLES WITH READ LOCK下,所以DML会被block,也许是因为DML执行频率远大于DDL操作,所以mysqldump选择了最大DML并发度
我们知道InnoDB行锁的等待时间超过innodb_lock_wait_timeout就会报超时错误。这里的metadata lock一直等待着,为什么没有超时呢?其实metadata lock的超时设置是另一个系统变量lock_wait_timeout。
lock_wait_timeout控制着所有涉及到metadata lock的操作,包括DML and DDL,以及在表、视图、存储过程、存储函数上的操作,以及LOCK TABLES、FLUSH TABLES WITH READ LOCK、HANDLER等。
---------------------
1号线 :富锦路方向呼兰路站出口向左 ,8:15
呼兰西路1号(近纪蕴路)智力产业园2号楼 ,
--------------------
Copying to tmp table
109:
USE orders;
SELECT
a.substation '分站',
LEFT(b.pay_time,7) '月份',
COUNT(IF(a.has_preOrder=1,1,NULL)) '订金单支付笔数',
ROUND(SUM(b.dingjin)/100,2) '订金单支付订金金额',
ROUND(SUM(IF(a.has_preOrder=1 AND b.pay_count > 2,b.pre_order_fee,0))/100,2) '订金单支付尾款金额',
COUNT(IF(a.has_preOrder=0,1,NULL)) '全款单支付笔数',
ROUND(SUM(IF(b.pay_count=1 AND a.has_preOrder=0,b.pre_order_fee,0))/100,2) '全款单首次支付金额',
ROUND(SUM(IF(b.pay_count>1 AND a.has_preOrder=0,b.pre_order_fee,0))/100,2) '全款单尾款支付金额'
FROM orderGroup a LEFT JOIN new_preOrder b ON a.id = b.orderGroup_id
WHERE a.attribute = 3 AND b.`status` = 1 AND b.pay_time > '2015-04-01 00:00:00' GROUP BY LEFT(b.pay_time,7),a.substation;
-----------------
dstat --top-bio --top-io --top-cpu --top-mem
dstat -lasmt
dstat --top-bio --top-io --top-cpu --top-mem --disk-tps --disk-util
dstat --top-bio --top-io --top-cpu --top-mem --disk-tps --disk-util --top-oom
dstat -cdlmnpsy
----------------------
查找操作频繁的表
mysqlbinlog --start-datetime='2015-06-08 16:00:00' --stop-datetime='2015-06-08 16:30:00' mysql-bin.000016 |grep -i -e "^update" -e "^insert" -e "^delete" -e "^replace" -e "^alter" |
cut -c1-100 | tr '[A-Z]' '[a-z]' |
sed -e "s/\t/ /g;s/\`//g;s/(.*$//;s/ set .*$//;s/ as .*$//" | sed -e "s/ where .*$//" |
sort | uniq -c | sort -nr
mysqlbinlog --start-datetime='2015-06-17 02:30:00' --stop-datetime='2015-06-17 02:40:00' mysql-bin-bak.000022 |grep -i "UPDATE new_returnOrder"
select * from new_returnOrder WHERE processStatus_id = 1 AND user_id = 101627550 AND order_id = 2106056;
UPDATE new_returnOrder SET processStatus_id = 7 WHERE processStatus_id = 1 AND user_id = 101627550 AND order_id = 2106056;
------------------
mysql 将时间戳直接转换成日期时间
SELECT FROM_UNIXTIME( 1331057439, '%Y-%m-%d' )
2012-03-07
将日期转换为数字:
SELECT UNIX_TIMESTAMP('2012-03-07') ;
1331049600
----------------
MySQL 5.5 分区性能测试之索引使用情况
1.创建一个测试表
CREATE TABLE test (
id VARCHAR(20) NOT NULL,
name VARCHAR(20) NOT NULL,
submit_time DATETIME NOT NULL,
index time_index (submit_time),
index id_index (id)
)ENGINE=innodb
PARTITION BY RANGE COLUMNS(submit_time)
(
PARTITION p1 VALUES LESS THAN ('2010-02-01'),
PARTITION p2 VALUES LESS THAN ('2010-03-01'),
PARTITION p3 VALUES LESS THAN ('2010-04-01'),
PARTITION p4 VALUES LESS THAN ('2010-05-01'),
PARTITION p5 VALUES LESS THAN ('2010-06-01'),
PARTITION p6 VALUES LESS THAN ('2010-07-01'),
PARTITION p7 VALUES LESS THAN ('2010-08-01'),
PARTITION p8 VALUES LESS THAN ('2010-09-01'),
PARTITION p9 VALUES LESS THAN ('2010-10-01'),
PARTITION p10 VALUES LESS THAN ('2010-11-01'),
PARTITION p11 VALUES LESS THAN ('2010-12-01')
);
2.写一个存储过程,插入数据
delimiter //
CREATE PROCEDURE mark_test()
begin
declare v int default 0;
while v < 8000
do
insert into test values (v,'testing partitions',adddate('2010-01-01', INTERVAL v hour));
set v = v + 1;
end while;
end //
delimiter ;
call mark_test();
如果表是myisam引擎,insert数据很快,是innodb则慢许多,原因是在记日志上
3.实验开始
explain partitions select * from test where submit_time<'2010-04-03 09:00' and submit_time >'2010-04-03 01:00';
+----+-------------+-------+------------+-------+---------------+------------+---------+------+------+-------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+------------+-------+---------------+------------+---------+------+------+-------------+
| 1 | SIMPLE | test | p4 | range | time_index | time_index | 8 | NULL | 7 | Using where |
+----+-------------+-------+------------+-------+---------------+------------+---------+------+------+-------------+
上面可以看到,这个是查某一个分区里面的某一些内容,所以完全可以用到index.效果很好
explain partitions select * from test where submit_time<'2010-04-03 09:00' and submit_time >'2010-03-03 01:00';
+----+-------------+-------+------------+------+---------------+------+---------+------+------+-------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+------------+------+---------------+------+---------+------+------+-------------+
| 1 | SIMPLE | test | p3,p4 | ALL | time_index | NULL | NULL | NULL | 1464 | Using where |
+----+-------------+-------+------------+------+---------------+------+---------+------+------+-------------+
上面可以到看,跨分区查询是,如果某个分区没有用到索引(p4就是全表扫描),整个也没有用到index.但好的是,只扫描需要的分区
explain partitions select * from test where id>'1' and id <'100';
+----+-------------+-------+------------------------------------+-------+---------------+----------+---------+------+------+-------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+------------------------------------+-------+---------------+----------+---------+------+------+-------------+
| 1 | SIMPLE | test | p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11 | range | id_index | id_index | 62 | NULL | 13 | Using where |
+----+-------------+-------+------------------------------------+-------+---------------+----------+---------+------+------+-------------+
上面可以看到,如果你不用分区的字段查询,是很杯具的,因为MySQL不知道你分区的index是分别存放到哪个分区上,所以要全index扫描,
SET profiling = 1;
select * from test where id>'1' and id <'100';
show profiles;
show profile for query 1;
Sending data是去磁盘中读取select的结果,然后将结果返回给客户端。这个过程会有大量的IO操作。你可以使用show profile cpu for query XX;来进行查看,
发现MyISAM的CPU_system比InnnoDB大很多。至此可以得出结论是MyISAM进行表查询(区别仅仅使用索引就可以完成的查询)比InnoDB慢。
http://dev.mysql.com/doc/refman/5.0/en/show-profile.html
show plugins;
SELECT
PLUGIN_NAME as Name,
PLUGIN_VERSION as Version,
PLUGIN_STATUS as Status
FROM INFORMATION_SCHEMA.PLUGINS
WHERE PLUGIN_TYPE='STORAGE ENGINE';
------------------------
优化工具:
官方站点:MySQLTuner
wget --no-check-certificate http://mysqltuner.pl/mysqltuner.pl
wget https://raw.github.com/major/MySQLTuner-perl/master/mysqltuner.pl
http://github.com/rackerhacker/MySQLTuner-perl
http://mysqltuner.com/
Mysql性能检测优化脚本mysqltuner.pl
chmod +x mysqltuner.pl
./mysqltuner.pl
或:
perl mysqltuner.pl --socket=/tmp/mysql.sock
mysql监控管理工具--innotop
http://code.google.com/p/innotop/
wget http://innotop.googlecode.com/files/innotop-1.9.1-1.el6.noarch.rpm
# tar -zxvf innotop-1.9.0.tar.gz
# cd innotop-1.9.0
安装INNOTOP工具非常简单,其是由PERL写的,当然需要PERL环境和相关的工具包。在安装之前先要确定你的系统安装了Time::HiRes,Term::ReadKey,DBI,DBD::mysql这四个包
# perl -MCPAN -eshell
CPAN> install Time::HiRes
CPAN> install Term::ReadKey
CPAN> install DBI
CPAN> install DBD::mysql
在解压缩后的源码路径下有安装帮助文件,如下:
# perl Makefile.PL
如果没有报错,然后,使用
# make install
这样就安装 innotop,安装之后系统就会多出一个命令,innotop
得到帮助
# innptop --help
使用方法:
innotop -uroot -p123456 -h127.0.0.1 -P3306
innotop --config=/usr/local/mysql/my.cnf
ioprofile 查看IO情况的利器
项目地址: http://code.google.com/p/aspersa/
ioprofile的使用文档: http://aspersa.googlecode.com/svn/html/ioprofile.html
pt-ioprofile
使用:可以man pt-ioprofile查看使用方法。
pt-ioprofile的原理是对某个pid附加一个strace进程进行IO分析。通过ps aux|grep mysqld 找到 mysqld进程对应的进程号,
通过pt-ioprofile查看哪个文件的IO占用时间最多,对于定位问题更有用的是通过IO的吞吐量来进行定位。使用参数 --cell=sizes,该参数将结果已 B/s 的方式展示出来
工具准备:
iotop: http://guichaz.free.fr/iotop/
pt-ioprofile:http://www.percona.com/downloads/percona-toolkit/2.2.1/
wget percona.com/get/percona-toolkit.tar.gz
wget http://guichaz.free.fr/iotop/files/iotop-0.6-1.noarch.rpm
Step1 : iostat 查看IO情况
Step2: iotop定位负载来源进程
Step3 pt-ioprofile定位负载来源文件
pt-ioprofile --profile-process=mysqld --save-samples pro.log
tuning-primer.sh
wget http://www.day32.com/MySQL/tuning-primer.sh
---------------------
很慢的查询:
USE orders;
SELECT COUNT(1) '订单数',COUNT(DISTINCT user_id) '用户数',substation '分站' FROM orderGroup
WHERE attribute IN (3,4) AND substation IN ('shanghai','suzhou','wuxi','nanjing','chengdu','chongqing','hangzhou','kunming')
AND add_time > '2014-01-01 00:00:00' AND add_time < '2015-01-01 00:00:00' GROUP BY substation;
22s
rows:1291858
stateduration (summed) in secpercentage
Sorting result 13.5387469.41125
Sending data 5.9635230.57414
Sorting result:线程正在对结果集进行排序
Sending data:这表示多种情况:线程可能在多个状态之间传送数据,或者在生成结果集,或者在向客户端返回数据
Copying to tmp table:线程正在执行查询,并且将其结果集都复制到一个临时表中,这种状态一般要么是在做GROUP BY操作,要么是文件排序操作,或者是UNION操作
USE orders;
SELECT COUNT(1) '订单数',COUNT(DISTINCT user_id) '用户数',substation '分站' FROM orderGroup FORCE INDEX(group_index_substation)
WHERE attribute IN (3,4) AND substation IN ('shanghai','suzhou','wuxi','nanjing','chengdu','chongqing','hangzhou','kunming')
AND add_time > '2014-01-01 00:00:00' AND add_time < '2015-01-01 00:00:00' GROUP BY substation;
8.8s
如果这里出现useing tempoary useingfilesort
强制用索引,就少了Sorting result ,没有了filesort
rows:2597610
USE orders;
SELECT COUNT(1) '订单数',COUNT(DISTINCT user_id) '用户数',substation '分站' FROM orderGroup FORCE INDEX(add_time_index,group_index_substation)
WHERE attribute IN (3,4) AND substation IN ('shanghai','suzhou','wuxi','nanjing','chengdu','chongqing','hangzhou','kunming')
AND add_time > '2014-01-01 00:00:00' AND add_time < '2015-01-01 00:00:00' GROUP BY substation;
8s:
2597610
mysql对于排序,使用了两个变量来控制sort_buffer_size和 max_length_for_sort_data。
可以通过增大这两个属性值加快ORDER BY或GROUP BY操作。
我增加了read_buffer_size = X M
read_rnd_buffer_size = X M
排序的内容太多,也影响到了速度
对于MySQL,最简单的衡量查询开销的三个指标如下:
响应时间 扫描的行数 返回的行数
一般MySQL能够使用如下三种方式应用where条件:从好到坏依次为:
1.在索引中使用where条件过滤不匹配的记录。这是在存储引擎完成的
2.使用索引覆盖扫描来返回记录(在Extra列中出现了Using index)来返回记录,直接从索引中过滤不需要的记录并返回命中的结果。这是在MySQL服务器层完成的,但无需再回表查询记录
3从数据表中返回数据,然后过滤不满足条件的记录(在Extra列中出现Using where)。这在MySQL服务器层完成,MySQL需要先从数据库表读出记录然后过滤
如果发现需要扫描大量的数据但只返回少数的行,那么通常可以尝试下面的技巧去优化它
1.使用索引覆盖扫描,把所有需要的列都放到索引中,这样存储引擎无须回表获取对应行就可以返回结果了
2.改变库表结构。例如使用单独的汇总表
3.重写这个复杂的查询,让MySQL优化器能够以更优化的方式执行这个查询
重构查询的方式
1.一个复杂查询还是多个简单查询
2.切分查询
3.关联分解
·让缓存的效率更高
·将查询分解后,执行单个查询可以减少锁的竞争
·在应用层做关联,可以更容易对数据库进行拆分,更容易做到高性能和可扩展
·查询本身效率也可能会有所提升
·可以减少冗余记录的查询。在应用层做关联查询,意味着对于某条记录应用只需要查询一次,而
在数据库中做关联查询,则可能需要重复地访问一部分数据。从这点看,这样的重构还可能会减
少网络和内存的消耗
·更进一步,这样做相当于在应用中实现了哈希关联,而不是在使用MySQL的嵌套循环关联
MYSQL强制使用索引和禁止使用索引
mysql强制索引和禁止某个索引
select * from table force index(PRI) limit 2;(强制使用主键)
select * from table ignore index(PRI) limit 2;(禁止使用主键)
--------------------------
模式匹配like %% 优化
create table ch2 (id int,name varchar(20));
insert into ch2 values (1,'chenhao'),(2,'wangdan'),(3,'sunwukong'),(4,'rulai');
mysql> explain select id from ch2 where name like '%chenhao%';
+----+-------------+-------+------+---------------+------+---------+------+------+-------------+
| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+------+---------------+------+---------+------+------+-------------+
| 1 | SIMPLE | ch2 | ALL | NULL | NULL | NULL | NULL | 4 | Using where |
+----+-------------+-------+------+---------------+------+---------+------+------+-------------+
1 row in set (0.00 sec)
通过覆盖索引来优化
mysql> explain select name from ch2 where name like '%chenhao%';
+----+-------------+-------+-------+---------------+--------------+---------+------+------+--------------------------+
| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+-------+---------------+--------------+---------+------+------+--------------------------+
| 1 | SIMPLE | ch2 | index | NULL | idx_ch2_name | 63 | NULL | 4 | Using where; Using index |
+----+-------------+-------+-------+---------------+--------------+---------+------+------+--------------------------+
explain select a.id,a.name from ch2 a join
(select name from ch2 where name like '%chenhao%') b
on a.name=b.name;
mysql> explain select count(*) from ch2 where name like '%chenhao%';
+----+-------------+-------+-------+---------------+--------------+---------+------+------+--------------------------+
| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+-------+---------------+--------------+---------+------+------+--------------------------+
| 1 | SIMPLE | ch2 | index | NULL | idx_ch2_name | 63 | NULL | 4 | Using where; Using index |
+----+-------------+-------+-------+---------------+--------------+---------+------+------+--------------------------+
limit分页优化
explain select * from test order by id limit 7000,10;
mysql> explain select * from test order by id limit 7000,10;
+----+-------------+-------+------+---------------+------+---------+------+------+----------------+
| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+------+---------------+------+---------+------+------+----------------+
| 1 | SIMPLE | test | ALL | NULL | NULL | NULL | NULL | 7952 | Using filesort |
+----+-------------+-------+------+---------------+------+---------+------+------+----------------+
将所有数据排序后,并取10行数据
优化后:
explain select * from test where id>=7000 order by id limit 10;
+----+-------------+-------+-------+---------------+----------+---------+------+------+-------------+
| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+-------+---------------+----------+---------+------+------+-------------+
| 1 | SIMPLE | test | index | id_index | id_index | 62 | NULL | 10 | Using where |
+----+-------------+-------+-------+---------------+----------+---------+------+------+-------------+
处理的ROWS=10,并用上了索引
mysql> select count(distinct name) from ch2;
+----------------------+
| count(distinct name) |
+----------------------+
| 4 |
+----------------------+
count(distinct n)优化:
mysql> explain select count(distinct name) from ch2;
+----+-------------+-------+-------+---------------+--------------+---------+------+------+-------------------------------------+
| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+-------+---------------+--------------+---------+------+------+-------------------------------------+
| 1 | SIMPLE | ch2 | range | NULL | idx_ch2_name | 63 | NULL | 7 | Using index for group-by (scanning) |
+----+-------------+-------+-------+---------------+--------------+---------+------+------+-------------------------------------+
优化:
select count(*) from (select distinct name from ch2) tmp;
mysql> explain select count(*) from (select distinct name from ch2) tmp;
+----+-------------+-------+-------+---------------+--------------+---------+------+------+------------------------------+
| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+-------+---------------+--------------+---------+------+------+------------------------------+
| 1 | PRIMARY | NULL | NULL | NULL | NULL | NULL | NULL | NULL | Select tables optimized away |
| 2 | DERIVED | ch2 | index | NULL | idx_ch2_name | 63 | NULL | 6 | Using index |
+----+-------------+-------+-------+---------------+--------------+---------+------+------+------------------------------+
or优化:
create table ch3 (id int,name varchar(20),age int);
insert into ch3 values (1,'chenhao',30),(2,'wangdan',30),(3,'sunwukong',100000),(4,'rulai',999999999);
create index idx_ch3_name on ch3(name);
create index idx_ch3_age on ch3(age);
select * from ch3 where name='chenhao' or age=999999999;
mysql> explain select * from ch3 where name='chenhao' or age=999999999;
+----+-------------+-------+------+--------------------------+------+---------+------+------+-------------+
| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
+----+-------------+-------+------+--------------------------+------+---------+------+------+-------------+
| 1 | SIMPLE | ch3 | ALL | idx_ch3_name,idx_ch3_age | NULL | NULL | NULL | 4 | Using where |
+----+-------------+-------+------+--------------------------+------+---------+------+------+-------------+
优化:
explain select * from ch3 where name='chenhao'
union all
select * from ch3 where age=999999999;
----+--------------+------------+------+---------------+--------------+---------+-------+------+-------------+
| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
+----+--------------+------------+------+---------------+--------------+---------+-------+------+-------------+
| 1 | PRIMARY | ch3 | ref | idx_ch3_name | idx_ch3_name | 63 | const | 1 | Using where |
| 2 | UNION | ch3 | ref | idx_ch3_age | idx_ch3_age | 5 | const | 1 | Using where |
| NULL | UNION RESULT | <union1,2> | ALL | NULL | NULL | NULL | NULL | NULL | |
+----+--------------+------------+------+---------------+--------------+---------+-------+------+-------------+
我不去想见了你之后,还会有一次,既然选择了去陪你一次。
----------------------
mydumper
---------------------
http://www.cnblogs.com/xuanzhi201111/p/4172604.html
MySQL监控系统MySQL MTOP的搭建
yum -y install ntp vim-enhanced gcc gcc-c++ flex bison autoconf automake bzip2-devel
yum -y install ncurses-devel zlib-devel libjpeg-devel libpng-devel libtiff-devel freetype-devel libXpm-devel
yum -y install gettext-devel pam-devel libtool libtool-ltdl openssl openssl-devel fontconfig-devel
yum -y install libxml2-devel curl-devel libicu libicu-devel libmcrypt libmcrypt-devel libmhash libmhash-devel
----------------------
MySQL实时统计脚本--计算QPS,TPS和线程连接数等
qtps.sh
#!/bin/bash
mysqladmin -uroot -p'123123' extended-status -i1|awk 'BEGIN{local_switch=0;print "QPS Commit Rollback TPS Threads_con Threads_run \n------------------------------------------------------- "}
$2 ~ /Queries$/ {q=$4-lq;lq=$4;}
$2 ~ /Com_commit$/ {c=$4-lc;lc=$4;}
$2 ~ /Com_rollback$/ {r=$4-lr;lr=$4;}
$2 ~ /Threads_connected$/ {tc=$4;}
$2 ~ /Threads_running$/ {tr=$4;
if(local_switch==0)
{local_switch=1; count=0}
else {
if(count>10)
{count=0;print "------------------------------------------------------- \nQPS Commit Rollback TPS Threads_con Threads_run \n------------------------------------------------------- ";}
else{
count+=1;
printf "%-6d %-8d %-7d %-8d %-10d %d \n", q,c,r,c+r,tc,tr;
}
}
}'
sh qtps.sh 2 >1.log &
sh qtps.sh |tee 2.log &
20.114,10.23,10.87,20.109,20.118,20.120,20.127,20.138,20.144,20.149,20.153,20.76,
收集统计mysql指标脚本(QPS, TPS ,IOPS)
删除指定行:
sed /'-------------------------------------------------------'/d 10_87.log.bak
sed /'QPS'/d 10_87.log.bak
打印文件的第1列和第4列 :
awk '{print $1,$4}' 20_23_3.log
awk '{print $1}' 20_109_3.log >20_109_3_1.log
awk '{print $4}' 20_109_3.log >20_109_3_4.log
awk '{print $1}' 20_76_3.log >20_76_3_1.log
awk '{print $4}' 20_76_3.log >20_76_3_4.log
-----------------------
mysql 5.5关键参数
1.1 innodb plugins
20.109:mysql 5.5
select @@version;
select @@innodb_version;
innodb_file_per_table=1
innodb_file_format=barracuda
innodb_strict_mode=1
show variables like 'innodb_file_%';
innodb_file_format=Antelope
show variables like 'innodb_strct_mode';
在5.1中,innodb文件格式是Antelope,在mysql5.5中文件格式要调整为barracuda,该格式支持压缩功能,
truncate table的速度比以前要快,Barracuda兼容Antelope格式
Barracuda,需要修改2个选项:
innodb_file_format = "Barracuda"
innodb_file_format_max = "Barracuda"
附官方 InnoDB 参数:
--innodb_purge_threads=1
--innodb_file_format=barracuda
--innodb-buffer-pool-size=8192M
--innodb_support_xa=FALSE
--innodb_flush_method=O_DIRECT
--innodb-flush-log-at-trx-commit=2
--innodb-log-file-size=2000M
--innodb-log-buffer-size=64M
--innodb-io-capacity=200
--skip-innodb-adaptive-hash-index
--innodb-read-io-threads=8
--innodb-write-io-threads=8
--innodb_change_buffering=all
--innodb_stats_on_metadata=off
--innodb-buffer-pool-instances=12
--skip-grant-tables
--max_tmp_tables=100
--query_cache_size=0
--query_cache_type=0
1.1.2 充分利用 CPU
假如你的 CPU 是 2 颗 8 核的,那么你可以设置:
innodb_read_io_threads = 8
innodb_write_io_threads = 8
show engine innodb status\G;
# 查看物理CPU个数
cat /proc/cpuinfo| grep "physical id"| sort| uniq| wc -l
2
# 查看逻辑CPU的个数
cat /proc/cpuinfo| grep "processor"| wc -l
16
# 查看每个物理CPU中core的个数(即核数)
cat /proc/cpuinfo| grep "cpu cores"| uniq
8
1.1.3 提高刷新脏页数量和合并插入数量, , 改善磁盘 IO 处理能力。
200 单盘 SAS/SATA
2000 SAS*12 RAID10
5000 SSD
50000 FUSION-IO
注:此参数支持动态改变,但需要 SUPER 权限。
SET GLOBAL innodb_io_capacity = 2000;
show variables like 'innodb_io_capacity';
在从缓冲区刷新脏页时,刷新脏页的数量为innodb_io_capacity
在合并插入缓冲时,合并插入缓冲的数据为innodb_io_capacity的5%
1.1.5 5 5 5 让 让 l Innodb_Buffer_Pool 缓冲池 热数据存活更久
http://www.orczhou.com/index.php/2010/05/innodb-plugin-make-buffer-cache-scan-resistant/
show variables like 'innodb_old_blocks%';
| innodb_old_blocks_pct | 37 |
| innodb_old_blocks_time | 0 |
用 show engine innodb status\G; 可以查看当前的信息
1)当数据块需要从数据文件中读取时,首先会放到old sublist的头部(midpoint)。然后,如果有对这个数据块的访问,那么就将这个数据块放到new sublist的首部
一般来说,一个数据块被取出后,立刻会有读取,也就很快会被放到new sublist的头部。一种糟糕的情况是,如果是mysqldump访问全部数据块,也就会导致所有的数据块被放到new sublist。这样Buffer Pool也就全部被污染了。
Buffer Pool中LRU缺点、细节
这样LRU算法的一个缺点是,如果有某一个查询做了一次全表扫描(例如备份,临时DDL等),都可能会导致整个Buffer Pool中LRU链表中的数据块都被替换了,甚至很多热点数据也会被替换,而这些新进的数据块可能在这一次查询之后就再也不会被读到了。我们也称这种情况为“Buffer Pool被污染”了。
在InnoDB则引入了一些新的机制来避免这种情况。算法仍然是LRU算法,但是加上了中点策略(类似于MyISAM的key buffer中的midpoint strategy)。同时引入了参数innodb_old_blocks_time来控制Buffer Pool不被污染。
LRU链表中的数据分为两部分:Sublist of new和Sublist of old。后者包含访问最近没有访问的数据块(链表越后面的数据块,最近越没有被访问)。默认情况,前者占63%(5/8),后者37%(3/8)。
2) Buffer Pool中LRU队列如何防止污染
InnoDB Plugin通过引入如下的参数来防止“污染”:
innodb_old_blocks_pct: 控制old sublist在LRU队列的长度
innodb_old_blocks_time:该参数决定了,当Block被插入到midpoint(old sublist)后,必须要在old sublist停留超过innodb_old_blocks_time(ms)时间后,才有可能被转移到new sublist。例如,将innodb_old_blocks_time设置为1000(即1s),当出现Table scan出现时,InnoDB先将数据块载入到midpoint(old sublist)上,程序读取数据块,因为这时,数据块在old sublist中的停留时间还不到innodb_old_blocks_time,所以不会被转移到new sublist中。这就避免了Buffer Pool被污染的情况。更酷的是,这个参数是动态调整的,所以在做意外的Table scan时,动态调整一下该参数就可以了。
3) 防污染的效率观察
可以根据SHOW INNODB STATUS\G中Buffer Pool段开看看相关效率:
Database pages 618464
Old database pages 228280
0.00 youngs/s, 0.00 non-youngs/s
Pages made young 3342076, not young 0
Buffer pool hit rate 1000 / 1000, young-making rate 0 / 1000 not 0 / 1000
Old database pages 618464:在旧区域存放着有多少个页
Pages made young:移动到新区域有多少个页
Pages made not young:没有移动到新区域有多少个页
youngs/s:每秒移动到新区域有多少个
non-youngs/s:每秒没有移动到新区域有多少个
young-making rate:移动到新区域的比例
young-making not rate:没有移动到新区域的比例
Buffer pool hit rate 1000 / 1000:说明缓冲区命中率为100%,通常该值不应该小于95%,如果小于,则要观察是否由于全表扫描引起的LRU列表被污染的问题
young-making rate 0 / 1000:,没有出现全表扫描造成的热数据替换
not 0 / 1000:说明数据访问全在热链内存区中
Buffer?Pool?LRU?List?
InnoDB?Buffer?Pool通过LRU算法管理页面的替换策略。LRU?List按照功能被划分为两部分:LRU_young与LRU_old,默认情况下,LRU_old为链表长度的3/8。页面读取时(get/read?ahead),首先链入LRU_old的头部。页面第一次访问时(read/write),从LRU_old链表移动到LRU_young的头部(整个LRU链表头)。??
全表扫描的所有页面,也遵循先读入LRU_old,后移动到LRU_young的原则,会导致Buffer?Pool中的其他页面被替换出内存。为防止全表扫描的负面影响,InnoDB提供了系统参数,innodb_old_blocks_time:只有当页面的后续访问与第一次访问时间间隔大于此值,才会移动到LRU链表头。innodb_old_blocks_time在5.1.41版本中引入。默认为0,也就是说全表扫描的页面会进入LRU_young(链表头),一个大表的全表扫描会导致大量page被替换出内存
1.?若当前buf?pool没有evict任何page,说明buf?pool足够大,?
//?page能够完全放在内存。因此不需要将page从LRU_old移动到LRU_head
2.?若当前page不是第一次访问,同时用户设置了innodb_old_blocks_time参数,??//?那么计算本次访问与第一次访问的时间间隔是否超过了此参数设置,若未超过,??//?则本次访问不会将page从LRU_old移动到LRU_head
1.1.7 7 7 7 B INNODB 同时支持多个 l BufferPool 实例
在 MySQL5.5 里,可以通过 innodb_buffer_pool_instances 参数来增加
InnoDB_Buffer_Pool 实例的个数, 使用哈希函数将读取缓存的数据页随机分配到
一个缓冲池里面,这样每个缓冲区实例就可以分别管理着自己的 free list, flush
list, LRU,来解决此问题。
注:innodb_buffer_pool_size 必须大于 1G,生成 InnoDB_Buffer_Pool 多实例
才有效,最多支持 64 个 InnoDB_Buffer_Pool 实例。
show variables like 'innodb_buffer_pool_instances';
1.1.8 可关闭自适应哈希索引
show engine innodb status\G;来监控 SEMAPHORES 这项,如果你发
现有很多 waits,那么应该关闭该功能,提升系统性能
SEMAPHORES
----------
OS WAIT ARRAY INFO: reservation count 21266030, signal count 308043580
Mutex spin waits 2212094935, rounds 10517266421, OS waits 9372536
RW-shared spins 143005995, rounds 598178321, OS waits 4464557
RW-excl spins 186178811, rounds 1374047634, OS waits 2024816
Spin rounds per wait: 4.75 mutex, 4.18 RW-shared, 7.38 RW-excl
-------------------------------------
INSERT BUFFER AND ADAPTIVE HASH INDEX
-------------------------------------
Hash table size 21249841, node heap has 36864 buffer(s)
21971.50 hash searches/s, 363.72 non-hash searches/s
使用哈希索引的比例:21971/363=不用关闭
1.1. 0 10 提高了默认 b innodb
注:innodb_thread_concurrency = 0 时,innodb_thread_sleep_delay 参数就无效了 。
同样 innodb_concurrency_tickets 也没了意思, 这里推荐设置为 0, 更好去发挥 CPU
多核处理能力,提高并发量。
Mysql调用存储引擎的方法:
read_row
这样的方法有好几个,例如:read by index(有序读)sequential read(连续读)random read(随机读)
write_row;
update_row;
delete_row; 具体函数参见源码。
要执行这些方法(函数),innodb需要检查innodb内部的线程计数,如果内部线程计数超过innodb_thread_concurrency,就等待innodb_thread_sleep_delay变量设置的微妙后再尝试进入。如果第二次仍然不成功-进入线程队列sleep(FIFO)。
show variables like 'innodb_thread_concurrency';
innodb_thread_concurrency=8 你的服务器CPU有几个就设置为几,建议用默认一般为8
show engine innodb status\G;
waiting in InnoDB queue状态频繁出现,说明数据库并发太大,需要分析引起并发太大的原因或者增加数据库服务器(假设你根据硬件精确计算了该值)。
Innodb 会根据 innodb_thread_concurrency 的值来限制同时并发的线程数不超过它。如果线程当前不在 Innodb 的内核中运行,则它的状态可能是 "waiting in InnoDB queue" 或 "sleeping before joining InnoDB queue"。后面这个状态有点意思 - Innodb 为了避免有太多的线程同时抢着要进入运行队列,那么就会尝试让这些线程进入等待状态(如果没有足够的空闲插槽(slot)的话)。
1.1.11 预读算法的变化
InnoDB 有两种预读算法提高 I/O 性能,一种是线性预读,另一种是随机预
读。
线性预读:当顺序读取 extent 块(包含 64 个 page)innodb_read_ahead_threshold
设置的 page 页数量时,触发一个异步读取请求,将下一个页提前读取到 buffer
pool 中。在 MySQL5.1.X 版本时,顺序读取 extent 块最后一个页时,InnoDB 决
定是否将下一个页提前读取到 Innodb_Buffer_Pool 缓冲池中。
随机预读:在 Innodb_Buffer_Pool 缓冲池中发现同一个 extent 块内有若干个页 ,
那么会触发一个异步读取请求, 把剩余的页页读取进来,随机预读增加了不必要
的复杂性,常常导致性能下降,在 MySQL5.5.X 版本时,已经将其删除了。
innodb_read_ahead_threshold 参数默认是 56,可动态更改。你可以 show engine
innodb status\G,来查看当前的情况:
Pages read ahead:表示每秒预读了多少页
evicted without access:表示预读的页没有被访问,每秒被踢出了多少页。
如果你发现有很多 evicted without access,说明你设置的值过小了,应该增大
Pages read ahead 0.00/s, evicted without access 0.00/s, Random read ahead 0.00/s
1.1. 5 15 5 15 改善清除程序进度
从 MySQL5.5.X 版本开始,该操作运行于独立的线程中,并支持更多的并发
数。用户可通过设置 innodb_purge_threads 配置参数来选择清除操作是否使用单
独线程,默认情况下参数设置为 0(不使用单独线程) ,设置为 1 时表示使用单
独的清除线程。
这个参数一般不用调整,默认的即可。
1.1. 6 16 添加了删除缓冲和清除缓冲
1.1. 20 可动态关闭 InnoDB 更新元数据统计功能
innodb_stats_on_metadata 参数的作用是:每当查询 information_schema 元数
据库里的表时, Innodb 还会随机提取其他数据库每个表索引页的部分数据, 来更
新 information_schema.STATISTICS 表,来返回刚才你查询的结果。当你的表很
大,并且数量很多时,耗费的时间就会很长,很多经常不访问的数据也会进入到
Innodb_Buffer_Pool缓冲池里, 那么就会把缓冲池所污染。 并且ANALYZE TABLE
和 SHOW TABLE STATUS 语句也会造成 Innodb 随机提取数据。
从 MySQL5.5.X 版本开始,你可以动态关闭 innodb_stats_on_metadata,默认
是开启的。
set global innodb_stats_on_metadata = OFF;
那么有人问,如果我关闭了该功能,会不会造成数据统计的不精确?
答案是:不会的。下面我用一个例子,来加以证实:
mysql> show variables like 'innodb_stats%';
+---------------------------+-------------+
| Variable_name | Value |
+---------------------------+-------------+
| innodb_stats_method | nulls_equal |
| innodb_stats_on_metadata | OFF |
. 1.2 2 2 2. . . .2 2 2 2 中继日志 g relay-log 自我修复。
在 MySQL5.5.X 版本开始, 增加了 relay_log_recovery 参数, 这个参数的作用
是:当 slave 从库宕机后,假如 Relay-Log 损坏了,导致一部分中继日志没有处
理,则自动放弃所有未执行的 relay-log,并且重新从 MASTER 上获取日志,这
样保证 relay-log 的完整
---------------------
converting HEAP to MyISAM
线程是从转换内部临时表MEMORY内存表磁盘MyISAM表。
The state "converting HEAP to MyISAM" happens when a query that needs a temporary table is converting from an in-memory temporary table
to a disk-based temporary table.
MySQL uses memory-based temporary tables up to the size limit set by the tmp_table_size system variable. If a query needs a temporary table larger than
this it will be converted to a disk-based temporary table using the MyISAM storage engine.
GROUP BY queries and ORDER BY queries that can't use an index for the ordering are the most common causes of temporary table creation.
Solution
You could consider raising the per-session value of tmp_table_size if you have sufficient memory. Use the SHOW GLOBAL STATUS statement
to see the value of the Created_tmp_tables variable. It will show the total number of temporary tables that have been
tmp_table_size和max_heap_table_size两个参数来调整,使得大数据量的查询时不用将结果集拷贝到物理磁盘
use orders;
EXPLAIN SELECT * FROM (
SELECT
a.paymentFlow_no 'paymentFlowNo',
DATE_FORMAT(a.add_time, '%Y-%m-%d %H:%i:%s') 'payTime',
DATE_FORMAT(c.add_time, '%Y-%m-%d %H:%i:%s') 'orderTime',
ROUND(a.r_final_amount/100, 2) 'payAmount',
ROUND(a.r_credits_fee/100, 2) 'creditsFee',
ROUND(c.r_total_amount/100, 2) 'totalAmount',
ROUND(c.settled_amount/100, 2) 'settleAmount',
ROUND(c.r_commission_fee/100, 2) 'commissionAmount',
ROUND(c.r_got_credits/100, 2) 'gotCredits',
'全款/尾款' AS 'paymentType',
c.id 'orderId',
c.orderGroup_id 'groupNo',
IF(c.attribute = 2, '否', '是') 'isTgOrder',
IF(c.processStatus_id = 20, '待支付订金',
IF(c.processStatus_id = 21, '待填写支付金额',
IF(c.processStatus_id = 41, '待支付',
IF(c.processStatus_id = 51, '待发货',
IF(c.processStatus_id = 71 OR c.processStatus_id = 81, '待收货确认',
IF(c.processStatus_id = 91, '已完成', IF(c.processStatus_id = 100, '已关闭', ''))))))) 'processStatus',
c.shop_id 'shopId',
c.shop_name 'shopName',
c.company_id 'companyId',
c.user_id 'userId',
c.user_name 'userName'
FROM payment_flow a LEFT JOIN orderGroup c ON c.paymentFlow_id = a.id
WHERE a.payPlatform_id = 20 AND a.type= 1 AND c.attribute IN (2, 3)
UNION
SELECT
a.paymentFlow_no 'paymentFlowNo',
DATE_FORMAT(a.add_time, '%Y-%m-%d %H:%i:%s') 'payTime',
DATE_FORMAT(c.add_time, '%Y-%m-%d %H:%i:%s') 'orderTime',
ROUND(a.r_final_amount/100, 2) 'payAmount',
ROUND(a.r_credits_fee/100, 2) 'creditsFee',
ROUND(c.r_total_amount/100, 2) 'totalAmount',
ROUND(c.settled_amount/100, 2) 'settleAmount',
ROUND(c.r_commission_fee/100, 2) 'commissionAmount',
ROUND(c.r_got_credits/100, 2) 'gotCredits',
'订金' AS 'paymentType',
c.id 'orderId',
c.orderGroup_id 'groupNo',
IF(c.attribute = 2, '否', '是') 'isTgOrder',
IF(c.processStatus_id = 20, '待支付订金',
IF(c.processStatus_id = 21, '待填写支付金额',
IF(c.processStatus_id = 41, '待支付',
IF(c.processStatus_id = 51, '待发货',
IF(c.processStatus_id = 71 OR c.processStatus_id = 81, '待收货确认',
IF(c.processStatus_id = 91, '已完成', IF(c.processStatus_id = 100, '已关闭', ''))))))) 'processStatus',
c.shop_id 'shopId',
c.shop_name 'shopName',
c.company_id 'companyId',
c.user_id 'userId',
c.user_name 'userName'
FROM payment_flow a LEFT JOIN new_preOrder b ON b.paymentFlow_id = a.id LEFT JOIN orderGroup c ON c.id = b.orderGroup_id
WHERE a.payPlatform_id = 20 AND a.type= 2 AND c.attribute IN (2, 3)
) a ORDER BY payTime
LIMIT 0,15;
------------------
03 02 * * * cd /home/xubo/pythontools && /usr/bin/python mysql_slow_log.py >/tmp/mysql_slow_log.log
*/1 04,05 * * * cd /tmp && /usr/bin/python tmp.py
*/5 * * * * cd /home/xubo/pythontools && /usr/bin/python mysql_log.py
#*/2 * * * * cd /home/xubo/pythontools && /usr/bin/python check_tb_frequency.py
#01 02 * * * cd /home/scrips && /bin/bash mysql_dump_db.sh
-----------------------
set session profiling=1;
select * from test where id>'1' and id <'100';
show profiles;
show profile for query 1;
SELECT * from wallet_log_201506_24 where log_file like '%cash_create%';
-----------------------
测试的脚本:
for size in 100
do
cd /mnt/stec
sysbench --test=fileio --file-num=1 --file-total-size=${size}G prepare
sync
echo 3 > /proc/sys/vm/drop_caches
for numthreads in 4
do
sysbench --test=fileio --file-total-size=${size}G --file-test-mode=rndwr --max-time=3600 --max-requests=0 --num-threads=$numthreads --rand-init=on --file-num=1 --file-extra-flags=direct --file-fsync-freq=0 --file-io-mode=sync --file-block-size=16384 --report-interval=10 run | tee -a run$size.thr$numthreads.txt
done
done
----------------------
linux下如何快速从tar中提取文件
tar tvf mysql.tar |grep -i "my.cnf"
tar xvf mysql.tar mysql/my.cnf
--------------------------
sed /'-------------------------------------------------------'/d 10_87.log.bak
----------------------
Keepalived配置
http://www.linuxde.net/2014/12/15647.html
keepalived安装(两台都装)
yum -y install keepalived
chkconfig keepalived on
keepalived配置(server1)
vi /etc/keepalived/keepalived.conf
vrrp_sync_group VG_1 {
group {
inside_network
outside_network
}
}
vrrp_instance inside_network {
state BACKUP
interface eth1
virtual_router_id 51
priority 101
advert_int 1
authentication {
auth_type PASS
auth_pass 3489
}
virtual_ipaddress {
192.168.3.150/24
}
nopreempt
}
vrrp_instance outside_network {
state BACKUP
interface eth0
virtual_router_id 50
priority 101
advert_int 1
authentication {
auth_type PASS
auth_pass 3489
}
virtual_ipaddress {
10.96.153.239/24
}
nopreempt
}
keepalived配置(server2)
vrrp_sync_group VG_1 {
group {
inside_network
outside_network
}
}
vrrp_instance inside_network {
state BACKUP
interface eth1
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 3489
}
virtual_ipaddress {
192.168.3.150
}
notify_master /data/sh/mysqlfailover.sh
}
vrrp_instance outside_network {
state BACKUP
interface eth0
virtual_router_id 50
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 3489
}
virtual_ipaddress {
10.96.153.239/24
}
}
两台server的state都设置为backup,server1增加nopreempt配置,并且server1 priority比server2高,这样用来实现当server1从宕机恢复时,不抢占VIP;
server2设置notify_master /data/sh/mysqlfailover.sh,意味着server2接管server1后,执行这个脚本,以把server2的mysql提升为主
----
10.10.20.105 mongodb数据库 huoban库的user表,13501765097的2014年的数据
2014-01-01 00:00:00
1388505600
2014-12-31 59:59:59
1420257599
db.user.findOne()
如查询j大于3,小于4:
$gt:大于
$lt:小于
$gte:大于或等于
$lte:小于或等于
db.user.find({ "register_date" : { $gte: 1388505600, $lte: 1420257599 } } ); // value1 < field < value
db.user.find({"mobile" : "15618527360"});
db.user.find({"jia_user_id" : "15618527360"});
db.user.find({"user_name" : "Emily2014"});
Emily2014
/root/mysql-proxy-0.8.5-linux-glibc2.3-x86-64bit/libexec/mysql-proxy --proxy-backend-addresses=10.10.20.155:3311 --proxy-lua-script=/root/mysql-proxy-0.8.5-linux-glibc2.3-x86-64bit/logquery.lua --proxy-address=:23311
-----------------
最常见的查询:
Selct digest_text,count_star from performance_schema.events_statements_summary_by_digest
order by count_start desc;
查询读写TOP 5 的等待事件:
select event_name,count_read 'reads',count_write 'write',count_misc misc,
(count_misc+count_write+count_read) as sum_io from file_summary_by_event_name order by 5 desc limit 5;
SELECT event_name,count_star,total_latency,total_read,total_written, total_requested FROM io_global_by_wait_by_bytes LIMIT 10;
查询io top 10文件
SELECT * FROM io_global_by_file_by_bytes LIMIT 10;
use performance_schema
select thread_id,name,type from threads;
--------------------------
诊断你的参数设置是否合理
wget http://pkgs.repoforge.org/perl-TermReadKey/perl-TermReadKey-2.30-1.el5.rf.x86_64.rpm
pt-variable-advisor --user=root --password=123456 --source-of-variables vars.txt
pt-variable-advisor --user=root --password=123456 localhost --source-of-variables vars.txt
------------------
select @@profiling;
set profiling=1;
select * from t1;
show profiles;
show profile for query 1;
PROFILE功能只能在SESSION级别使用
大部分情况下,PROFILE的结果我们主要关注两列:Status、Duration,前者表示的是PROFILE里的状态,它和PROCESSLIST的状态基本是一致的,后者是该状态的耗时。因此,我们最主要的是关注处于哪个状态耗时最久,这些状态中,哪些可以进一步优化。
tatus 建议
System lock 确认是由于哪个锁引起的,通常是因为MySQL或InnoDB内核级的锁引起的建议:如果耗时较大再关注即可,一般情况下都还好
Sending data 从server端发送数据到客户端,也有可能是接收存储引擎层返回的数据,再发送给客户端,数据量很大时尤其经常能看见
备注:Sending Data不是网络发送,是从硬盘读取,发送到网络是Writing to net
建议:通过索引或加上LIMIT,减少需要扫描并且发送给客户端的数据量
Sorting result 正在对结果进行排序,类似Creating sort index,不过是正常表,而不是在内存表中进行排序
建议:创建适当的索引
Table lock 表级锁,没什么好说的,要么是因为MyISAM引擎表级锁,要么是其他情况显式锁表
create sort index 当前的SELECT中需要用到临时表在进行ORDER BY排序
建议:创建适当的索引
checking query cache for querychecking privileges on cachedsending cached result to clien
storing result in query cache
和query cache相关的状态,已经多次强烈建议关闭
explain select * from t1;
首先看下 type 这列的结果,如果有类型是 ALL 时,表示预计会进行全表扫描(full table scan)。通常全表扫描的代价是比较大的,建议创建适当的索引,通过索引检索避免全表扫描。此外,全索引扫描(full index scan)的代价有时候是比全表扫描还要高的,除非是基于InnoDB表的主键索引扫描。
再来看下 Extra 列的结果,如果有出现 Using temporary 或者 Using filesort 则要多加关注:
Using temporary,表示需要创建临时表以满足需求,通常是因为GROUP BY的列没有索引,或者GROUP BY和ORDER BY的列不一样,也需要创建临时表,建议添加适当的索引。
Using filesort,表示无法利用索引完成排序,也有可能是因为多表连接时,排序字段不是驱动表中的字段,因此也没办法利用索引完成排序,建议添加适当的索引。
Using where,通常是因为全表扫描或全索引扫描时(type 列显示为 ALL 或 index),又加上了WHERE条件,建议添加适当的索引。
暂时想到上面几个,如果有遗漏,以后再补充。
其他状态例如:Using index、Using index condition、Using index for group-by 则都还好,不用紧张。
show global variables like ‘%binlog_row_image%’;
set global binlog_row_image=minimal;
mysql 5.6中binlog_row_image:
当设置为minimal时候,binlog只记录了要修改的列的记录;
当设置为nolob的时候,在minimal的基础上binlog中加上非lob字段;
当binlog_row_image默认设置为了full,与5.5,5.1的日志格式保持一致,binlog记录上有的行记录信息;
所以在5.6中binlog_row_image设置为minimal,这样就可以大大减小了binlog的长度,进而减少了空间的使用。
---------------
分析出ibdata1的组成
开源社区提供了一个工具:innodb_space可以清晰地分析出ibdata1的组成(该工具需要bindata环境)
https://rubygems.org/gems/innodb_ruby
http://de.planet.mysql.com/entry/?id=35706
Installing innodb_ruby
gem install innodb_ruby
gem install bindata
innodb_space -f /tmp/ibdata1 space-page-type-summary
type count percent description
UNDO_LOG 4430725 80.61 Undo log
ALLOCATED 1035701 18.84 Freshly allocated
INODE 28348 0.52 File segment inode
INDEX 722 0.01 B+Tree index
IBUF_BITMAP 334 0.01 Insert buffer bitmap
XDES 333 0.01 Extent descriptor
IBUF_FREE_LIST 152 0.00 Insert buffer free list
SYS 3 0.00 System internal
TRX_SYS 1 0.00 Transaction system header
FSP_HDR 1 0.00 File space header
可以看到ibdata1文件中大量的都是undo_log,在定位到其中的文件组成后,我们可以采取以下方案:
建议用户将版本从5.1升级到5.5,5.5中有独立的purge线程可以很快的回收掉undo log,迁移的过程中由于是采用逻辑迁移,会重建ibdata1文件降低空间使用;
在5.6中可以单独设置undo tablespace文件,避免与ibdata1混用在一起。
-----------
组提交:
新参数
MySQL 5.6提供了两个参数来控制binlog group commit:
http://www.bitscn.com/pdb/mysql/201407/226226.html
binlog_max_flush_queue_time
单位为微妙,用于从flush队列中取事务的超时时间,这主要是防止并发事务过高,导致某些事务的RT上升。
可以阅读函数MYSQL_BIN_LOG::process_flush_stage_queue 来理解其功能
binlog_order_commits
show variables like 'binlog_max_flush_queue_time';
show variables like 'binlog_order_commits';
percona server 5.6.24安装
yum安装
yum install http://www.percona.com/downloads/percona-release/redhat/0.1-3/percona-release-0.1-3.noarch.rpm
yum list | grep percona
yum install Percona-Server-client-56 Percona-Server-server-56
或:
wget -r -l 1 -nd -A rpm -R "*devel*,*debuginfo*" \
http://www.percona.com/downloads/Percona-Server-5.6/LATEST/RPM/rhel6/x86_64/
rpm -ivh Percona-Server-shared-56-5.6.6-alpha60.1.285.rhel6.x86_64.rpm \
Percona-Server-client-56-5.6.6-alpha60.1.285.rhel6.x86_64.rpm \
Percona-Server-server-56-5.6.6-alpha60.1.285.rhel6.x86_64.rpm
rpm -ivh Percona-Server-shared-56-5.6.6-alpha60.1.285.rhel6.x86_64.rpm
rpm -ivh Percona-Server-client-56-5.6.6-alpha60.1.285.rhel6.x86_64.rpm
rpm -ivh Percona-Server-server-56-5.6.6-alpha60.1.285.rhel6.x86_64.rpm
或源码安装
wget http://www.percona.com/downloads/Percona-Server-5.6/Percona-Server-5.6.23-72.1/binary/tarball/Percona-Server-5.6.23-rel72.1-Linux.x86_64.tar.gz
$ tar xfz Percona-Server-5.6.23-rel72.1-Linux.x86_64.tar.gz
cmake . -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_CONFIG=mysql_release -DFEATURE_SET=community -DWITH_EMBEDDED_SERVER=OFF
make
make install
或二进制安装
wget http://www.percona.com/downloads/Percona-Server-5.6/Percona-Server-5.6.24-72.2/binary/tarball/Percona-Server-5.6.24-rel72.2-Linux.x86_64.ssl100.tar.gz
tar xfz percona-server-5.6.15-62.0.tar.gz