/array_crm01_ora/oracrm/product/10204/crs/bin/oifcfg iflist
/array_crm01_ora/oracrm/product/10204/crs/bin/oifcfg getif
/array_crm01_ora/oracrm/product/10204/crs/bin/oifcfg setif -global lan900/191.84.201.0:cluster_interconnect
/array_crm01_ora/oracrm/product/10204/crs/bin/oifcfg delif -global lan1/191.84.201.0
/array_crm01_ora/oracrm/product/10204/crs/bin/oifcfg setif -global lan901/130.84.201.0:public
/array_crm01_ora/oracrm/product/10204/crs/bin/oifcfg delif -global lan0/130.84.201.0
/array_crm01_ora/oracrm/product/10204/crs/bin/srvctl modify nodeapps -n h1011101 -A 130.84.201.1/255.255.255.0/lan901
/array_crm01_ora/oracrm/product/10204/crs/bin/srvctl modify nodeapps -n h1011102 -A 130.84.201.3/255.255.255.0/lan901
--------------------------------------------------------------------------------------------------------------------------
/array_bill01_ora/orabill/product/10204/crs/bin/oifcfg iflist
/array_bill01_ora/orabill/product/10204/crs/bin/oifcfg getif
/array_bill01_ora/orabill/product/10204/crs/bin/oifcfg setif -global lan900/191.84.201.0:cluster_interconnect
/array_bill01_ora/orabill/product/10204/crs/bin/oifcfg delif -global lan1/191.84.201.0
/array_bill01_ora/orabill/product/10204/crs/bin/oifcfg setif -global lan901/130.84.201.0:public
/array_bill01_ora/orabill/product/10204/crs/bin/oifcfg delif -global lan2/130.84.201.0
/array_bill01_ora/orabill/product/10204/crs/bin/srvctl modify nodeapps -n h1011103 -A 130.84.201.5/255.255.255.0/lan901
/array_bill01_ora/orabill/product/10204/crs/bin/srvctl modify nodeapps -n h1011104 -A 130.84.201.7/255.255.255.0/lan901
/sbin/init.d/init.crs stop
/sbin/init.d/init.crs start
更改rac的vip地址和集群互联地址
主机名 修改前的ip地址 修改后的ip地址 作用
rac1 192.168.0.181 192.168.1.181 rac1 eth0 native ip
rac2 192.168.0.182 192.168.1.182 rac2 eth0 native ip
rac1-vip 192.168.0.191 192.168.1.191 rac1 vip
rac2-vip 192.168.0.192 192.168.1.192 rac2 vip
rac1-priv 10.10.10.181 10.1.0.181 rac1 eth1 ip interconn
rac2-priv 10.10.10.182 10.1.0.182 rac2 eth1 ip interconn
操作过程:停止所有oracle相关的进程, 然后修改操作系统的ip设置, 修改与oracle相关的ip地址的设定, 启动crs及相关服务.
具体操作步骤如下:
1 停止oracle相关的所有进程, 包括数据库, asm, node application, crs本身.
1.1 查看当前系统上crs运行的状态
$ ./crs_stat -t
1.2 关闭服务
srvctl stop service -d bsscrm -s ser_crm01
srvctl stop service -d bsscrm -s ser_crm02
1.3 关闭数据库
srvctl stop database -d bsscrm
1.4 关闭其他应用程序
srvctl stop nodeapps -n h1011101
srvctl stop nodeapps -n h1011102
1.5 关闭crs后台进程, 必须在所有节点上运行.
crsctl stop crs
2 修改操作系统的ip设置(由HP工程师操作)
/etc/hosts文件内容为:(修改前)
130.84.201.2 h1011101
130.84.201.4 h1011102
191.84.201.2 h1011101-priv
191.84.201.4 h1011102-priv
130.84.201.1 h1011101-vip
130.84.201.3 h1011102-vip
修改后:
3 启动crs, 设置oracle中ip地址相关的设置.
3.1 启动crs, 并关闭随crs启动的应用程序
crsctl start crs
3.2 使用oifcfg修改网卡设置, oifconfig可以被用来设置和查看网卡被oracle使用的方式.
oifcfg iflist会显示当前使用的网卡及其子网设置, 而oifcfg getif -global 则会显示配置文件中的信息.
cd /array_crm01_ora/oracrm/product/10204/crs/bin
A.modify public IP
# ./oifcfg getif -global
eth0 192.168.0.0 global public
eth1 10.10.10.0 global cluster_interconnect
# ./oifcfg setif -global eth0/192.168.1.0:public
# ./oifcfg iflist
eth0 192.168.1.0
eth0 192.168.0.0
eth1 10.1.0.0
# ./oifcfg delif -global eth0/192.168.0.0
# ./oifcfg iflist
eth0 192.168.1.0
eth1 10.1.0.0
# ./oifcfg getif -global
eth0 192.168.1.0 global public
eth1 10.10.10.0 global cluster_interconnect
B. modify private IP
# ./oifcfg setif -global eth1:/10.1.0.0:cluster_interconnect
# ./oifcfg getif -global
eth0 192.168.1.0 global public
eth1 10.10.10.0 global cluster_interconnect
eth1: 10.1.0.0 global cluster_interconnect
# ./oifcfg setif -global eth1/10.1.0.0:cluster_interconnect
# ./oifcfg getif -global
eth0 192.168.1.0 global public
eth1 10.10.10.0 global cluster_interconnect
eth1 10.1.0.0 global cluster_interconnect
eth1: 10.1.0.0 global cluster_interconnect
# ./oifcfg delif -global eth1:
# ./oifcfg delif -global eth1/10.10.10.0
# ./oifcfg getif -global
eth0 192.168.1.0 global public
eth1 10.1.0.0 global cluster_interconnect
/array_crm01_ora/oracrm/product/10204/crs/bin/oifcfg setif -global lan900/191.84.201.0:cluster_interconnect
/array_crm01_ora/oracrm/product/10204/crs/bin/oifcfg delif -global lan1/191.84.201.0
oifcfg setif -node rac1 cms0/139.185.142.1:cluster_interconnect
oifcfg setif -node rac2 cms0/139.185.142.2:cluster_interconnect
C.modify VIP
# ./srvctl modify nodeapps -n rac1 -A 192.168.1.191/255.255.255.0/eth0
# ./srvctl modify nodeapps -n rac2 -A 192.168.1.192/255.255.255.0/eth0
3.4 设置listener.ora和tnsnames.ora, 检查这些文件中是否有指定原来ip的地方, 修改为更改后的ip地址, 在rac1的配置文件中listener.ora包含了 192.168.0.181我修改成了192.168.1.181, rac2上的 listener.ora也做了相应的修改.
3.5 启动node applications, database,service
srvctl stop nodeapps -n h1011101
srvctl stop nodeapps -n h1011102
srvctl start database -d bsscrm
srvctl start service -d bsscrm -s ser_crm01
srvctl start service -d bsscrm -s ser_crm02
3.6 来看看我们的成果:
# ifconfig -a
eth0 Link encap:Ethernet HWaddr 00:0C:29:0D:FE:0F
inet addr:192.168.1.182 Bcast:192.168.1.255 Mask:255.255..255.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:142242 errors:0 dropped:0 overruns:0 frame.:0
TX packets:140057 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:83167889 (79.3 MiB) TX bytes:87987399 (83.9 MiB)
Interrupt:19 Base address:0x1480
eth0:1 Link encap:Ethernet HWaddr 00:0C:29:0D:FE:0F
inet addr:192.168.1.192 Bcast:192.168.1.255 Mask:255.255..255.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
Interrupt:19 Base address:0x1480
eth1 Link encap:Ethernet HWaddr 00:0C:29:0D:FE:19
inet addr:10.1.0.182 Bcast:10.1.0.255 Mask:255.255.255.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:29781 errors:0 dropped:0 overruns:0 frame.:0
TX packets:26710 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:19667330 (18.7 MiB) TX bytes:11573375 (11.0 MiB)
Interrupt:16 Base address:0x1800
lo Link encapocal Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:21796 errors:0 dropped:0 overruns:0 frame.:0
TX packets:21796 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:6238339 (5.9 MiB) TX bytes:6238339 (5.9 MiB)
# ./crs_stat
# su - oracle
oracle@rac2:~$ lsnrctl stat
LSNRCTL for Linux: Version 10.2.0.1.0 - Production on 23-AUG-2006 23:23:47
Copyright (c) 1991, 2005, Oracle. All rights reserved.
Connecting to (ADDRESS=(PROTOCOL=tcp)(HOST=)(PORT=1521))
STATUS of the LISTENER
------------------------
Alias LISTENER_RAC2
Version TNSLSNR for Linux: Version 10.2.0.1.0 - Production
Start Date 23-AUG-2006 22:24:44
Uptime 0 days 0 hr. 59 min. 3 sec
Trace Level off
Security ON: Local OS Authentication
SNMP OFF
Listener Parameter File /u01/app/oracle/product/10.2.0/db_1/network/admin/listener.ora
Listener Log File /u01/app/oracle/product/10.2.0/db_1/network/log/listener_rac2.log
Listening Endpoints Summary...
(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.1.192)(PORT=1521)))
(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.1.182)(PORT=1521)))
Services Summary...
Service "+ASM" has 1 instance(s).
Instance "+ASM2", status BLOCKED, has 1 handler(s) for this service...
Service "+ASM_XPT" has 1 instance(s).
Instance "+ASM2", status BLOCKED, has 1 handler(s) for this service...
Service "LSExtProc" has 1 instance(s).
Instance "LSExtProc", status UNKNOWN, has 1 handler(s) for this service...
Service "orcl" has 2 instance(s).
Instance "orcl1", status READY, has 1 handler(s) for this service...
Instance "orcl2", status READY, has 2 handler(s) for this service...
Service "orclXDB" has 2 instance(s).
Instance "orcl1", status READY, has 1 handler(s) for this service...
Instance "orcl2", status READY, has 1 handler(s) for this service...
Service "orcl_XPT" has 2 instance(s).
Instance "orcl1", status READY, has 1 handler(s) for this service...
Instance "orcl2", status READY, has 2 handler(s) for this service...
The command completed successfully
以上操作大部分使用root进行, 实时上, 使用svrctl进行的操作可以使用root用户完成也可以使用oracle用户完成, 而修改vip则必须使用root用户完成使用crs_stat -ls可以查看各个资源的所有者, 属组, 以及相应的权限.
Warning: The above name 'lan901:801' is truncated, use -w to show the output in wide format
export NLS_DATE_FORMAT="YYYY/MM/DD HH24:MI:SS"
export LANG=zh_CN.hp15CN
export PS1=[`hostname`]:[`whoami`]%:
--查看ip配置
oifcfg iflist
--查看cluster里的ip
oifcfg getif
--增加新的cluster的ip地址段
oifcfg setif -global eth1/192.168.1.0:cluster_interconnect
--删除旧ip地址
oifcfg delif -global lan1/192.168.1.0
---增加新的public地址段
oifcfg setif -global eth1/192.168.125.0:public
--删除旧的ip地址段
oifcfg delif -global eth0/192.168.125.0
--修改vip
srvctl modify nodeapps -n nccpxdb1 -A 192.168.125.1/255.255.255.0/eth1
srvctl modify nodeapps -n nccpxdb2 -A 192.168.125.3/255.255.255.0/eth1