单实例迁移到单节点的RAC

1.背景

本地有一个单实例,现在需要将该单实例变为RAC,目前先只创建一个节点。

 

====》准备:

1.添加一个网卡

2.添加硬盘

3.调整内存

 

2.该节点环境变量调整

----因为已经有了oracle相关用户组,所以,就不创建oracle相关用户组,但是后面要授权一下oracleasm组权限

#1.关闭防火墙

service iptables stop

service ip6tables stop

chkconfig iptables off

chkconfig ip6tables off

#2.添加用户 用户组

groupadd -g 1020 asmadmin

groupadd -g 1021 asmdba

groupadd -g 1022 asmoper

useradd -u 1001 -g oinstall -G asmadmin,asmdba,asmoper grid

#useradd -u 1001 -g oinstall -G dba,oper,asmdba oracle

##注意uid不要和之前的用户冲突额

#4.创建所需目录

mkdir -p /u01/app/grid

mkdir -p /u01/app/11.2.0/grid

chown -R grid:oinstall /u01

chown -R oracle. /u01/app/oracle/

chmod -R 775 /u01

#5.配置node1 grid环境变量

 

#7.配置limits资源限制

 

#8.配置内核参数

 

#9.添加hosts文件

echo "

#node1

192.168.11.111           node1.test.com          node1           #public ip

10.10.10.11             node1-priv.test.com     node1-priv      #private ip

192.168.11.11          node1-vip.test.com      node1-vip       #node1 vip

#node2

" >> /etc/hosts

 

#10.yum安装包

 

#11.关闭selinux

 

#12.修改tmpfs

sed -i '/tmpfs/d' /etc/fstab

echo "tmpfs                     /dev/shm                tmpfs   defaults,size=4G        0 

0" >>/etc/fstab

mount -o remount /dev/shm

 

#14.配置bind

sed -i 's/127.0.0.1/any/' /etc/named.conf

sed -i 's/::1/any/' /etc/named.conf

sed -i 's/localhost;/any;/' /etc/named.conf

sed -i 's/dnssec-enable yes/dnssec-enable no/' /etc/named.conf

sed -i 's/dnssec-validation yes/dnssec-validation no/' /etc/named.conf

 

echo "

zone \"test.com\" IN {

        type master;

        file \"test.com.hosts\";

};

zone \"11.168.192.in-addr.arpa\" IN {

        type master;

        file \"192.168.11.rev\";

};

" >> /etc/named.rfc1912.zones

 

echo "\$TTL 1D

@               IN      SOA node1.test.com. root.node1.test.com.(

                          2015111001

                          3h

                          1h

                          1w

                          1h )

                IN      NS      node1.test.com.

node1           IN      A       192.168.11.11

scan            IN      A       192.168.11.101

scan            IN      A       192.168.11.102

scan            IN      A       192.168.11.103

" > /var/named/test.com.hosts

 

echo "\$TTL 1D

@               IN      SOA node1.test.com. root.node1.test.com.(

                          1

                          3h

                          1h

                          1w

                          1h )

                IN      NS      node1.test.com.

1               IN      PTR     node1.test.com.

101             IN      PTR     scan.test.com.

102             IN      PTR     scan.test.com.

103             IN      PTR     scan.test.com.

" > /var/named/192.168.11.rev

 

service named restart

chkconfig --level 35 named on

 

service ntpd stop

mv /etc/ntp.conf /etc/ntp.conf.bak

 

#最后修改oracle属组

usermod -g oinstall -G dba,oper,asmdba,asmadmin oracle

 

3.配置好磁盘及网络

#最后,记得配置好网络,还有磁盘。然后就可以开始安装grid咯!还有主机名也记得改好

hostname node1.test.com

vim /etc/sysconfig/network

 

----另外hosts文件要配置正确,不然安装会报错的。   Vip ip priv都要正确。   还有dns也需要配置好

 

# fdisk -l

 

# fdisk /dev/sdb

n-àp-à1-à+1G   创建31g用于存放ocrvotdistk

p

e—>4

n-à+20G

p

/dev/sdb1               1         132     1060258+  83  Linux

/dev/sdb2             133         264     1060290   83  Linux

/dev/sdb3             265         396     1060290   83  Linux

/dev/sdb4             397        6527    49247257+   5  Extended

/dev/sdb5             397        3008    20980858+  83  Linux

/dev/sdb6            3009        5620    20980858+  83  Linux

W

 

# cat /proc/partitions

 

#rac1将磁盘分区初始化为rawdevice:

vi /etc/udev/rules.d/60-raw.rules

ACTION=="add", KERNEL=="sdb1", RUN+="/bin/raw /dev/raw/raw1 %N"

ACTION=="add", KERNEL=="sdb2", RUN+="/bin/raw /dev/raw/raw2 %N"

ACTION=="add", KERNEL=="sdb3", RUN+="/bin/raw /dev/raw/raw3 %N"

ACTION=="add", KERNEL=="sdb5", RUN+="/bin/raw /dev/raw/raw4 %N"

ACTION=="add", KERNEL=="sdb6", RUN+="/bin/raw /dev/raw/raw5 %N"

KERNEL=="raw[1-5]", MODE="0660", GROUP="asmadmin", OWNER="grid"

 

#重新启动udev

start_udev

 

[root@xuan1 ~]# ll /dev/raw/

total 0

crw-rw---- 1 grid asmadmin 162, 1 Jul 18 17:35 raw1

crw-rw---- 1 grid asmadmin 162, 2 Jul 18 17:35 raw2

crw-rw---- 1 grid asmadmin 162, 3 Jul 18 17:35 raw3

crw-rw---- 1 grid asmadmin 162, 4 Jul 18 17:35 raw4

crw-rw---- 1 grid asmadmin 162, 5 Jul 18 17:35 raw5

crw-rw---- 1 root disk     162, 0 Jul 18 17:35 rawctl

 

#如果有第二个节点,没有看到分区,先同步一下分区,然后再绑定raw

# partprobe /dev/sdb

 

                                                              

 

4.开始安装grid

 

----如果有本地数据库,注意端口号不要重复咯

 

 

 

 

----

# yum -y install /install/grid/rpm/cvuqdisk-1.0.9-1.rpm

service ntpd stop         

mv /etc/ntp.conf /etc/ntp.conf.bak

 

 

===============

/u01/app/11.2.0/grid/root.sh

 

[grid@node1 grid]$ crs_stat -t

Name           Type           Target    State     Host       

------------------------------------------------------------

ora....ER.lsnr ora....er.type ONLINE    ONLINE    node1      

ora....N1.lsnr ora....er.type ONLINE    ONLINE    node1      

ora....N2.lsnr ora....er.type ONLINE    ONLINE    node1      

ora....N3.lsnr ora....er.type ONLINE    ONLINE    node1      

ora.OCR.dg     ora....up.type ONLINE    ONLINE    node1      

ora.asm        ora.asm.type   ONLINE    ONLINE    node1      

ora.cvu        ora.cvu.type   ONLINE    ONLINE    node1      

ora.gsd        ora.gsd.type   OFFLINE   OFFLINE              

ora....network ora....rk.type ONLINE    ONLINE    node1      

ora....SM1.asm application    ONLINE    ONLINE    node1      

ora....E1.lsnr application    ONLINE    ONLINE    node1      

ora.node1.gsd  application    OFFLINE   OFFLINE              

ora.node1.ons  application    ONLINE    ONLINE    node1      

ora.node1.vip  ora....t1.type ONLINE    ONLINE    node1      

ora.oc4j       ora.oc4j.type  ONLINE    ONLINE    node1      

ora.ons        ora.ons.type   ONLINE    ONLINE    node1      

ora....ry.acfs ora....fs.type ONLINE    ONLINE    node1      

ora.scan1.vip  ora....ip.type ONLINE    ONLINE    node1      

ora.scan2.vip  ora....ip.type ONLINE    ONLINE    node1      

ora.scan3.vip  ora....ip.type ONLINE    ONLINE    node1 

 

----备份ocr

[root@node1 ~]# /u01/app/11.2.0/grid/bin/ocrconfig -export /home/oracle/ocr.bak

 

 

5.开始安装database集群包

 

----后面迁移到rac后,环境变量需要修改的

 

/u01/app/oracle/product/11.2.0/db_rac/root.sh

 

6.创建ASM存储

[grid@node1 ~]$ asmca&

 

7.准备备份oracle

----备份重于一切,重要操作先备份

  

 

 

8.先将数据库迁移到ASM

----集群的启停:

[root@node1 ~]# /u01/app/11.2.0/grid/bin/crsctl stop cluster -all  

[root@node1 ~]# /u01/app/11.2.0/grid/bin/crsctl start cluster -all  

[root@node1 ~]# /u01/app/11.2.0/grid/bin/crsctl stat res -t

[root@node1 ~]# /u01/app/11.2.0/grid/bin/crs_stat –t

 

SQL> select instance_name,status from v$instance;

INSTANCE_NAME                    STATUS

-------------------------------- ------------------------

+ASM1                            STARTED

SQL> col name for a35

SQL>  set lines 120

SQL> select name,state from v$asm_diskgroup;

NAME                                STATE

----------------------------------- ----------------------

DATA                                MOUNTED

FRD                                 MOUNTED

OCR                                 MOUNTED

 

------迁移数据到asm

SQL> select instance_name,status from v$instance;

INSTANCE_NAME    STATUS

---------------- ------------

orcl             OPEN

 

----迁移控制文件和数据文件到ASM

----修改参数文件

SQL> alter system set control_files='+DATA' scope=spfile;

SQL> alter system set db_create_file_dest='+DATA' scope=spfile;

SQL> shut immediate

[oracle@node1 ~]$ lsnrctl stop

 

RMAN> startup nomount

RMAN> restore controlfile from '/u01/app/oracle/oradata/orcl/control01.ctl';

Starting restore at 2016-07-18 02:10:16

using target database control file instead of recovery catalog

allocated channel: ORA_DISK_1

channel ORA_DISK_1: SID=70 device type=DISK

channel ORA_DISK_1: copied control file copy

output file name=+DATA/orcl/controlfile/current.256.917489417

Finished restore at 2016-07-18 02:10:17

 

RMAN> alter database mount;

RMAN> run{backup as copy database format '+DATA';}

RMAN> switch database to copy;

datafile 1 switched to datafile copy "+DATA/orcl/datafile/system.257.917489469"

datafile 2 switched to datafile copy "+DATA/orcl/datafile/sysaux.258.917489503"

datafile 3 switched to datafile copy "+DATA/orcl/datafile/undotbs1.260.917489543"

datafile 4 switched to datafile copy "+DATA/orcl/datafile/users.262.917489555"

datafile 5 switched to datafile copy "+DATA/orcl/datafile/example.259.917489529"

 

RMAN> recover database;

RMAN> alter database open;

----如果没有开归档,需要resetlogs

 

Connected to:

Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production

With the Partitioning, Automatic Storage Management, OLAP, Data Mining

and Real Application Testing options

SQL> select name from v$datafile;

NAME

--------------------------------------------------------------------------------

+DATA/orcl/datafile/system.257.917489469

+DATA/orcl/datafile/sysaux.258.917489503

+DATA/orcl/datafile/undotbs1.260.917489543

+DATA/orcl/datafile/users.262.917489555

+DATA/orcl/datafile/example.259.917489529

SQL> select name from v$controlfile;

NAME

--------------------------------------------------------------------------------

+DATA/orcl/controlfile/current.256.917489417

 

 

----迁移tempredo

SQL> select member from v$logfile;

MEMBER

--------------------------------------------------------------------------------

/u01/app/oracle/oradata/orcl/redo03.log

/u01/app/oracle/oradata/orcl/redo02.log

/u01/app/oracle/oradata/orcl/redo01.log

 

SQL> select bytes/1024/1024 from v$log;

BYTES/1024/1024

---------------

             50

             50

             50

 

SQL> alter database add logfile '+DATA' size 50m;

Database altered.

SQL> alter database add logfile '+DATA' size 50m;

Database altered.

SQL> alter database add logfile '+DATA' size 50m;

Database altered.

 

SQL> col member for a55

SQL> set lines 120

SQL> select group#,member from v$logfile;

    GROUP# MEMBER

---------- -------------------------------------------------------

         3 /u01/app/oracle/oradata/orcl/redo03.log

         2 /u01/app/oracle/oradata/orcl/redo02.log

         1 /u01/app/oracle/oradata/orcl/redo01.log

         4 +DATA/orcl/onlinelog/group_4.264.917487669

         5 +DATA/orcl/onlinelog/group_5.265.917487677

         6 +DATA/orcl/onlinelog/group_6.266.917487679

6 rows selected

 

SQL> select group#,sequence#,status,archived from v$log;

    GROUP#  SEQUENCE# STATUS           ARC

---------- ---------- ---------------- ---

         1          1 CURRENT          NO

         2          0 UNUSED           YES

         3          0 UNUSED           YES

         4          0 UNUSED           YES

         5          0 UNUSED           YES

         6          0 UNUSED           YES

SQL> alter system switch logfile;

System altered.

SQL> alter system switch logfile;

System altered.

SQL> alter system switch logfile;

System altered.

SQL> alter system checkpoint;

SQL> alter database drop logfile group 1;

SQL> alter database drop logfile group 2;

SQL> alter database drop logfile group 3;

 

SQL> select group#,member from v$logfile;

    GROUP# MEMBER

---------- -------------------------------------------------------

         4 +DATA/orcl/onlinelog/group_4.264.917487669

         5 +DATA/orcl/onlinelog/group_5.265.917487677

         6 +DATA/orcl/onlinelog/group_6.266.917487679

 

 

SQL> select name from v$tempfile;

NAME

------------------------------------------------------------------------------------------------------------------------

/u01/app/oracle/oradata/orcl/temp01.dbf

 

SQL> alter tablespace temp add tempfile '+DATA' size 10m autoextend on maxsize 10g;

SQL> alter tablespace temp drop tempfile '/u01/app/oracle/oradata/orcl/temp01.dbf';

 

SQL> select name,status from v$tempfile;

NAME                                                    STATUS

------------------------------------------------------- -------

+DATA/orcl/tempfile/temp.267.917488005                  ONLINE

 

 

----迁移闪回区和spfile

SQL> alter system set db_recovery_file_dest='+DATA';

SQL> show parameter pfile

NAME                                 TYPE        VALUE

------------------------------------ ----------- ------------------------------

spfile                               string      /u01/app/oracle/product/11.2.0

                                                 /db_1/dbs/spfileorcl.ora

SQL> create pfile='/home/oracle/pfile.ora' from spfile;

SQL> shut immediate

SQL> startup nomount pfile='/home/oracle/pfile.ora';

SQL> create spfile='+DATA' from pfile='/home/oracle/pfile.ora';

SQL> shut abort

 

[oracle@node1 ~]$ cd $ORACLE_HOME/dbs

[oracle@node1 dbs]$ mv spfileorcl.ora spfileorcl.ora.bak

ASMCMD> ls

spfile.271.917494905

ASMCMD> pwd

+data/orcl/PARAMETERFILE

[oracle@node1 dbs]$ cat initorcl.ora

spfile='+data/orcl/PARAMETERFILE/spfile.271.917494905'

 

SQL> startup

SQL> show parameter pfile

NAME                                 TYPE        VALUE

------------------------------------ ----------- ------------------------------

spfile                               string      +DATA/orcl/parameterfile/spfil

                                                 e.271.917494905

 

 

9.更换家目录到RAC目录

----之前安装数据库集群包时选择的家目录为/u01/app/oracle/product/11.2.0/db_rac,现在就切换下目录来启动数据库

SQL> shut immediate

 

[oracle@node1 ~]$ cd $ORACLE_HOME/dbs

[oracle@node1 dbs]$ pwd

/u01/app/oracle/product/11.2.0/db_1/dbs

[oracle@node1 dbs]$ cp initorcl.ora /u01/app/oracle/product/11.2.0/db_rac/dbs/.

 

[oracle@node1 dbs]$ export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_rac

[oracle@node1 ~]$ sqlplus  / as sysdba

SQL*Plus: Release 11.2.0.4.0 Production on Mon Jul 18 03:51:57 2016

Copyright (c) 1982, 2013, Oracle.  All rights reserved.

Connected to:

Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production

With the Partitioning, Real Application Clusters, Automatic Storage Management, OLAP,

Data Mining and Real Application Testing options

SQL> select inst_id,instance_name,instance_name,status from gv$instance;

   INST_ID INSTANCE_NAME    INSTANCE_NAME    STATUS

---------- ---------------- ---------------- ------------

         1 orcl             orcl             OPEN

 

10.设置实例1

Reference   http://blog.chinaunix.net/uid-23284114

----redo

SQL> select group#,thread# from v$log;

    GROUP#    THREAD#

---------- ----------

         4          1

         5          1

         6          1

----设置新的实例准备参数:

alter system set instance_number=1 scope=spfile sid='orcl1';

alter system set thread=1 scope=spfile sid='orcl1';

alter system set undo_tablespace=undotbs1 scope=spfile sid='orcl1';

 

----为新的实例启用日志进程:

SQL> alter database enable thread 1;

SQL> shut immediate

 

----为新实例准备pfile

[oracle@node1 dbs]$cd $ORACLE_HOME/dbs/

[oracle@node1 dbs]$ mv initorcl.ora initorcl1.ora

 

----准备启动到no mount

export ORACLE_SID=orcl1

export ORACLE_UNQNAME=orcl

 

SQL> startup

SQL> select inst_id,instance_name,instance_name,status from gv$instance;

   INST_ID INSTANCE_NAME    INSTANCE_NAME    STATUS

---------- ---------------- ---------------- ------------

         1 orcl1            orcl1            OPEN

 

----将数据库转换成rac模式,增加rac所需要的表空间、日志文件、初始化参数

alter system set cluster_database=true scope=spfile;

 

SQL> shut immediate

SQL> startup

 

[oracle@node1 ~]$ srvctl add database -d orcl -o /u01/app/oracle/product/11.2.0/db_rac/ -p +DATA/orcl/parameterfile/spfile.271.917494905

# -d db_unique_name;  -o oracle_home;  -p spfile_path

 

[oracle@node1 ~]$ srvctl add instance -d orcl -i orcl1 -n node1

#-i instance_name;  -n node_name

 

----虽然添加了数据库,但是状态错误

[oracle@node1 ~]$ srvctl status database -d orcl

Instance orcl1 is not running on node node1

[root@node1 ~]# /u01/app/11.2.0/grid/bin/crsctl stat res -t

[root@node1 ~]# /u01/app/11.2.0/grid/bin/crs_stat -t

 

----注册一下,因为前面是用db启动的

[oracle@node1 ~]$ srvctl start database -d orcl

[oracle@node1 ~]$ srvctl status database -d orcl

Instance orcl1 is running on node node1

[root@node1 ~]# /u01/app/11.2.0/grid/bin/crs_stat -t |grep orcl

ora.orcl.db    ora....se.type ONLINE    ONLINE    node1

 

----下面设置监听,及远程登录

[oracle@node1 ~]$ orapwd file=$ORACLE_HOME/dbs/orapworcl1 password=sys

SQL> show parameter listen

NAME                                 TYPE        VALUE

------------------------------------ ----------- ------------------------------

listener_networks                    string

local_listener                       string       (ADDRESS=(PROTOCOL=TCP)(HOST=

                                                 192.168.11.11)(PORT=1522))

remote_listener                      string

----监听已经自动注册到1522端口,但是没有注册到scan监听,下面设置下

 SQL> alter system set remote_listener='scan.test.com:1522';

 

----准备tns,注意端口号

[oracle@node1 ~]$ cat $ORACLE_HOME/network/admin/tnsnames.ora

zyx_rac =

  (DESCRIPTION =

    (ADDRESS_LIST =

      (ADDRESS = (PROTOCOL = TCP)(HOST = scan.test.com)(PORT = 1522))

    )

    (CONNECT_DATA =

      (SERVICE_NAME = orcl)

    )

  )

[grid@node1 ~]$ lsnrctl status

[grid@node1 ~]$ lsnrctl stop     ---重启下监听

[grid@node1 ~]$ lsnrctl start

[grid@node1 ~]$ cat /u01/app/11.2.0/grid/network/admin/listener.ora

[grid@node1 ~]$ cat /u01/app/11.2.0/grid/network/admin/endpoints_listener.ora

[root@node1 ~]# ps -ef|grep tnslsnr

 

[grid@node1 ~]$ lsnrctl status |grep orcl

Service "orcl" has 1 instance(s).

  Instance "orcl1", status READY, has 1 handler(s) for this service...

Service "orclXDB" has 1 instance(s).

  Instance "orcl1", status READY, has 1 handler(s) for this service...

 

--------

[oracle@node1 dbs]$ sqlplus  sys/sys@zyx_rac as sysdba

SQL*Plus: Release 11.2.0.4.0 Production on Mon Jul 18 02:15:00 2016

Copyright (c) 1982, 2013, Oracle.  All rights reserved.

Connected to:

Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production

With the Partitioning, Real Application Clusters, Automatic Storage Management, OLAP,

Data Mining and Real Application Testing options

 

11.添加实例2  ---同一台主机上添加

----目前RAC中只有一个实例,如下

SQL>  select inst_id,instance_name,instance_name,status from gv$instance;

   INST_ID INSTANCE_NAME    INSTANCE_NAME    STATUS

---------- ---------------- ---------------- ------------

         1 orcl1            orcl1            OPEN

 

===============》下面开始准备第二实例

create undo tablespace undotbs2 datafile '+DATA/' size 50m;

 

alter database add logfile thread 2 '+DATA/' size 50m;

alter database add logfile thread 2 '+DATA/' size 50m;

alter database add logfile thread 2 '+DATA/' size 50m;

 

在老节点为新的实例准备参数:

alter system set instance_number=2 scope=spfile sid='orcl2';

alter system set thread=2 scope=spfile sid='orcl2';

alter system set undo_tablespace=undotbs2 scope=spfile sid='orcl2';

 

在老节点为新的实例启用日志进程:

alter database enable thread 2;

 

设置集群可以启动的最大实例数# 在启动好新添加的实例后,需要重启下其他节点,以保证参数一致性,最好重启集群实例吧

alter system set cluster_database_instances=2 scope=spfile;

 

 

为新实例准备pfile

[oracle@node1 ~]$ cd $ORACLE_HOME/dbs

[oracle@node1 dbs]$ cp initorcl1.ora initorcl2.ora

 

准备启动新实例

[oracle@node1 dbs]$ export ORACLE_SID=orcl2

SQL> startup

SQL> select inst_id,instance_name,instance_name,status from gv$instance;

   INST_ID INSTANCE_NAME    INSTANCE_NAME    STATUS

---------- ---------------- ---------------- ------------

         2 orcl2            orcl2            OPEN

         1 orcl1            orcl1            OPEN

 

=====》这里重启一下实例1,让参数一致

srvctl stop database -d orcl    ----因为orcl2没有注册到crs中,所以不会停止orcl2

SQL> select inst_id,instance_name,instance_name,status from gv$instance;

   INST_ID INSTANCE_NAME    INSTANCE_NAME    STATUS

---------- ---------------- ---------------- ------------

         2 orcl2            orcl2            OPEN

srvctl start database -d orcl 

SQL> select inst_id,instance_name,instance_name,status from gv$instance;

   INST_ID INSTANCE_NAME    INSTANCE_NAME    STATUS

---------- ---------------- ---------------- ------------

         1 orcl1            orcl1            STARTED

         2 orcl2            orcl2            OPEN

SQL> select inst_id,instance_name,instance_name,status from gv$instance;

   INST_ID INSTANCE_NAME    INSTANCE_NAME    STATUS

---------- ---------------- ---------------- ------------

         2 orcl2            orcl2            OPEN

         1 orcl1            orcl1            OPEN

 

----检查一下参数看和orcl2是否一致

SQL> show parameter clus

 

----准备一下远程登录文件

[oracle@node1 dbs]$ cp orapworcl1 orapworcl2

 

[oracle@node1 dbs]$ sqlplus sys/sys@zyx_rac as sysdba

SQL> select * from gv$mystat where rownum=1;

   INST_ID        SID STATISTIC#      VALUE

---------- ---------- ---------- ----------

         1         73          0          0

[oracle@node1 ~]$ sqlplus sys/sys@zyx_rac as sysdba

SQL> select * from gv$mystat where rownum=1;

   INST_ID        SID STATISTIC#      VALUE

---------- ---------- ---------- ----------

         2        197          0          0

 

====因为实例在同一台主机上,所以这里无法将实例2添加到node1

[oracle@node1 dbs]$ srvctl add instance -d orcl -i orcl2 -n node1

PRCD-1051 : Failed to add instance to database orcl

PRCD-1023 : An instance has already been added to node node1 for database orcl

 

 

 

 

 

 

Reference    http://wenku.baidu.com/link

 

12.开启RAC归档

-----注意关、启顺序,最后关闭的先启动。

----当然,如果实例也也注册到crs中,就不用这样单独启动关闭咯,直接用srvctl去启停数据库

SQL> select instance_name from v$instance;

INSTANCE_NAME

----------------

orcl1

----这里注意,一定要在asm中创建该目录,不然写归档会报找不到路径导致数据库hang

SQL> alter system set log_archive_dest_1='location=+data/orcl/arch' scope=spfile sid='*';

System altered.

SQL> shut immediate

Database closed.

Database dismounted.

ORACLE instance shut down.

 

SQL> select instance_name from v$instance;

INSTANCE_NAME

----------------

orcl2

SQL> shut immediate

Database closed.

Database dismounted.

ORACLE instance shut down.

SQL> startup mount

SQL> archive log list

Database log mode              No Archive Mode

Automatic archival             Disabled

Archive destination            +DATA/orcl/arch

Oldest online log sequence     1

Current log sequence           3

SQL> alter database archivelog;

SQL> alter database open;

SQL> archive log list

Database log mode              Archive Mode

Automatic archival             Enabled

Archive destination            +DATA/orcl/arch

Oldest online log sequence     1

Next log sequence to archive   3

Current log sequence           3

 

----另一个节点一定不要用远程连接的方式连接,会连接到已经启动的实例的,可以看到归档已经开启咯

[oracle@node1 ~]$ sqlplus / as sysdba

SQL> startup mount

SQL> archive log list

Database log mode              Archive Mode

Automatic archival             Enabled

Archive destination            +DATA/orcl/arch

Oldest online log sequence     7

Next log sequence to archive   9

Current log sequence           9

SQL> alter database open;

SQL> select instance_name from v$instance;

INSTANCE_NAME

----------------

orcl1

SQL> select inst_id,instance_name,instance_name,status from gv$instance;

   INST_ID INSTANCE_NAME    INSTANCE_NAME    STATUS

---------- ---------------- ---------------- ------------

         1 orcl1            orcl1            OPEN

         2 orcl2            orcl2            OPEN

 

----多次切换日志后

SQL> alter system switch logfile;

ASMCMD> pwd

+data/orcl/arch

ASMCMD> ls

1_10_917486928.dbf

2_3_917486928.dbf

2_4_917486928.dbf

2_5_917486928.dbf

 

13.配置OEM

[grid@node1 ~]$ olsnodes -c

node-cluster

 

[oracle@node1 ~]$ emca -config dbcontrol db -repos recreate -cluster

STARTED EMCA at Jul 18, 2016 8:13:26 AM

EM Configuration Assistant, Version 11.2.0.3.0 Production

Copyright (c) 2003, 2011, Oracle.  All rights reserved.

Enter the following information:

Database unique name: orcl

Service name: orcl

Listener port number: 1522

Listener ORACLE_HOME [ /u01/app/11.2.0/grid ]:

Password for SYS user: 

Password for DBSNMP user: 

Password for SYSMAN user: 

Cluster name: node-cluster

Email address for notifications (optional):

Outgoing Mail (SMTP) server for notifications (optional):

ASM ORACLE_HOME [ /u01/app/11.2.0/grid ]:

ASM port [ 1522 ]:

ASM username [ ASMSNMP ]:

ASM user password: 

………

Jul 18, 2016 8:45:51 AM oracle.sysman.emcp.EMDBPostConfig invoke

WARNING:

************************  WARNING  ************************

Management Repository has been placed in secure mode wherein Enterprise Manager data will be encrypted.  The encryption key has been placed in the file: /u01/app/oracle/product/11.2.0/db_rac/node1_orcl/sysman/config/emkey.ora. Ensure this file is backed up as the encrypted data will become unusable if this file is lost.

***********************************************************

Enterprise Manager configuration completed successfully

FINISHED EMCA at Jul 18, 2016 8:45:53 AM

 

[oracle@node1 ~]$ emctl status dbconsole

Oracle Enterprise Manager 11g Database Control Release 11.2.0.4.0

Copyright (c) 1996, 2013 Oracle Corporation.  All rights reserved.

https://node1.test.com:5500/em/console/aboutApplication

Oracle Enterprise Manager 11g is running.

------------------------------------------------------------------

Logs are generated in directory /u01/app/oracle/product/11.2.0/db_rac/node1_orcl/sysman/log

 

 

 

 

请使用浏览器的分享功能分享到微信等