本文档主要分为以下两项,以供参考:
- RAC更换IP地址
- RAC更换存储(存储复制)
1.RAC 更换IP地址和端口号
#根据网卡名及ip地址为集群添加网卡信息 #查看集群安装目录 su - grid echo $ORACLE_HOME/bin #root用户执行查看当前集群ip信息 ./oifcfg getif #计算ip地址网络断,eg ipcalc -bnm 10.1.1.1 255.255.255.0 #需设置network 地址 #添加新的ip地址 ./oifcfg setif -global bond0/192.168.8.1:public ./oifcfg setif -global bond1/10.1.1.1:cluster_interconnect ./oifcfg getif #添加完成确认,停止集群 #每个节点都执行 ./crsctl stop crs #-f #调整网络,修改/etc/hosts文件 每个节点执行 vi /etc/hosts #启动集群 ./crsctl start crs #删除无用的网络信息 ./oifcfg getif ./oifcfg delif -global eth0/192.168.1.0 ./oifcfg delif -global eth1/10.1.1.0 #检查集群状态 su - grid crsctl stat res -t #修改监听端口号 or 创建监听 netca #命令修改 #修改集群配置 srvctl config scan_listener srvctl modify scan_listener -p 11521 #vip修改 srvctl modify nodeapps -n rac01 -A rac01-vip/255.255.255.0/eth0 srvctl modify nodeapps -n rac02 -A rac02-vip/255.255.255.0/eth0 #调整asm及数据库实例端口号 show parameter listener alter system set local_listener='(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.8.53)(PORT = 11521))' sid='+ASM1'; alter system set local_listener='(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.8.54)(PORT = 11521))' sid='+ASM2'; alter system set local_listener='(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.8.53)(PORT = 11521))' sid='racdb1'; alter system set local_listener='(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.8.54)(PORT = 11521))' sid='racdb2';
2.更换存储(存储复制)
2.1 确认磁盘,设置多路径绑定
#根据需求,检查磁盘,绑定多路径 #多路径命令参考 yum install device-mapper-multipath* modprobe dm-multipath #加载模块 modprobe dm-round-robin #加载模块 service multipathd restart #重启 multipath -F #删除现有路径 multipath -v2 #格式化路径 multipath -ll #查看多路径 #多路径配置文件 参考 multipath -ll #查看多路径 #根据查询结果配置多路径参数 # cat /etc/multipath.conf defaults { polling_interval 5 path_grouping_policy multibus prio const path_checker directio rr_min_io 1000 rr_weight uniform failback manual no_path_retry fail user_friendly_names yes } blacklist { devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" devnode "^hd[a-z]" devnode "^cciss!c[0-9]d[0-9]*" } multipaths { multipath { wwid 14f504e46494c45525743714d4f6a2d6437466f2d30435362 alias fra path_grouping_policy multibus } multipath { wwid 14f504e46494c4552664857314b622d576f63582d6f324743 alias data path_grouping_policy multibus } multipath { wwid 14f504e46494c45524f616d324a4f2d463255562d42764747 alias ocr1 path_grouping_policy multibus } multipath { wwid 14f504e46494c455230336f4738382d684268682d66306a32 alias ocr2 path_grouping_policy multibus } } #rhel7 systemctl start multipathd.service systemctl status multipathd.service #磁盘绑定 vi /etc/udev/rules.d/99-oracle-asmdevices.rules ENV{DM_NAME}=="ocr1", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="mapper/$env{DM_NAME}" ENV{DM_NAME}=="ocr2", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="mapper/$env{DM_NAME}" ENV{DM_NAME}=="ocr3", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="mapper/$env{DM_NAME}" ENV{DM_NAME}=="ocr4", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="mapper/$env{DM_NAME}" ENV{DM_NAME}=="ocr5", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="mapper/$env{DM_NAME}" ENV{DM_NAME}=="data", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="mapper/$env{DM_NAME}" ENV{DM_NAME}=="fra", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="mapper/$env{DM_NAME}" #生效 star_udev #rhle7 /sbin/udevadm control --reload-rules /sbin/udevadm trigger --type=devices --action=change
2.2 重新配置,或重新安装集群软件
#确认原集群磁盘路径,可依据原有磁盘情况,设置路径,尝试启动集群,如若无法启动。可尝试如下方式: #为避免ssh互信问题,建议先测试,或者重新执行root.sh即可 ssh rac1 date ssh rac2 date #手动执行ssh互信 #配置ssh ssh-keygen -t rsa ssh-keygen -t dsa #执行后测试 ssh rac01 cat /home/grid/.ssh/id_rsa.pub >>authorized_keys ssh rac01 cat /home/grid/.ssh/id_dsa.pub >>authorized_keys ssh rac02 cat /home/grid/.ssh/id_rsa.pub >>authorized_keys ssh rac02 cat /home/grid/.ssh/id_dsa.pub >>authorized_keys scp authorized_keys rac02:/home/grid/.ssh/ #执行root.sh $ORACLE_HOME/crs/install ./rootcrs.pl -verbose -deconfig -force # gpnp目录 修改参数文件 vi crsconfig_params #eg 磁盘信息,ip地址等 ASM_DISK_GROUP=ocr ASM_DISCOVERY_STRING=/u01/asm-disk/* ASM_DISKS=/u01/asm-disk/ocr1,/u01/asm-disk/ocr2,/u01/asm-disk/ocr3 #清理 dd dd if=/dev/zero of=/dev/mapper/crs1 bs=1024 count=2 dd if=/dev/zero of=/dev/mapper/crs2 bs=1024 count=2 dd if=/dev/zero of=/dev/mapper/crs3 bs=1024 count=2 #root.sh $ORACLE_HOME ./root.sh #执行完成后,确认监听是否正常,如没有监听服务,创建监听 netca ##重新安装软件,根据现有软件版本,重新安装 #创建用户 /usr/sbin/groupadd -g 1000 oinstall /usr/sbin/groupadd -g 1100 asmadmin /usr/sbin/groupadd -g 1200 dba /usr/sbin/groupadd -g 1201 oper /usr/sbin/groupadd -g 1300 asmdba /usr/sbin/groupadd -g 1301 asmoper useradd -u 1100 -g oinstall -G asmadmin,asmdba,asmoper,dba grid useradd -u 1200 -g oinstall -G dba,oper,asmdba oracle #设置用户密码 passwd grid passwd oracle echo oracle | passwd oracle --stdin echo oracle | passwd grid --stdin mkdir -p /u01/app/11.2.0/grid chown grid:oinstall /u01/ -R chmod 775 /u01/ -R mkdir -p /u02/app/oracle/product/11.2.0/db_home chown oracle:oinstall /u02/ -R chmod 775 /u02/ -R #grid配置环境变量 su - grid vi .bash_profile ORACLE_SID=+ASM; export ORACLE_SID ORACLE_BASE=/u01/app/grid; export ORACLE_BASE ORACLE_HOME=/u01/app/11.2.0/grid; export ORACLE_HOME ORACLE_PATH=/u01/app/oracle/common/oracle/sql; export ORACLE_PATH umask 022 PATH=$ORACLE_HOME/bin:$PATH:$HOME/bin:$ORACLE_PATH export PATH #配置Oracle环境变量 su - oracle vi .bash_profile export ORACLE_SID=mydb export ORACLE_BASE=/u02/app/oracle export ORACLE_HOME=/u02/app/oracle/product/11.2.0/db_home PATH=$ORACLE_HOME/bin:$PATH:$HOME/bin umask 022 export PATH #验证asm磁盘组 kfod asm_diskstring='/dev/mapper/*' disks=all kfed read /dev/mapper/data1 |grep grpname #确认oracle权限 $GRID_HOME/bin/setasmgidwrap o=/u01/app/product/11.2.0/dbhome_1/bin/oracle chmod 6751 oracle ls -l oracle #测试心跳 netstat -in ping -s 1500 -c 2 -I <源ip地址> <目标ip地址> traceroute -s <源ip地址> -r -F <目标ip地址> <包大小> #挂在数据磁盘组 su - grid sqlplus / as sysasm alter diskgroup data mount; #检查数据库初始化参数文件位置 asmcmd lsdg cd data cd #编辑启动参数文件 su - oracle cd $ORACLE_HOME/dbs vi initracdb1.ora spfile='+data/xxx/parameter/spfile000.ora' #启动数据库 startup #添加到集群服务 su - oracle srvctl add database -d racdb -o /u02/app/oracle/11.2.0/product/db_home -p data/xxx/spfile.ora srvctl add instance -d racdb -i racdb1 -n rac01 srvctl add instance -d racdb -i racdb2 -n rac02 #检查集群状态,监听状态等,重启测试 crsctl stat res -t lsnrctl stat sqlplus / as sysdba select instance_name,status from gv$instance;