-- Remember that this document is a quick reference, and not an exhaustive list of all commands for managing your RAC environment.
-- =======================================================================
-- Cluster Related Commands
-- =======================================================================
crs_stat -t Shows HA resource status (hard to read)
crsstat Ouptut of crs_stat -t formatted nicely
ps -ef|grep d.bin crsd.bin evmd.bin ocssd.bin
crsctl check crs CSS,CRS,EVM appears healthy
crsctl stop crs Stop crs and all other services
crsctl disable crs* Prevents CRS from starting on reboot
crsctl enable crs* Enables CRS start on reboot
crs_stop -all Stops all registered resources
crs_start -all Starts all registered resources
crsctl stop cluster -all Stops the cluster in all nodes
crsctl start cluster -all Starts the cluster in all nodes
* These commands update the file /etc/oracle/scls_scr/[node]/root/crsstart which contains the string “enable” or “disable” as appropriate.
-- =======================================================================
-- Database Related Commands
-- =======================================================================
srvctl start instance -d [db_name] -i [inst_name] Starts an instance
srvctl start database -d [db_name] Starts all instances
srvctl stop database -d [db_name] Stops all instances, closes database
srvctl stop instance -d [db_name] -i [inst_name] Stops an instance
srvctl start service -d [db_name] -s [service_name] Starts a service
srvctl stop service -d [db_name] -s [service_name] Stops a service
srvctl status service -d [db_name] Checks status of a service
srvctl status instance -d [db_name] -i [inst_name] Checks an individual instance
srvctl status database -d [db_name] Checks status of all instances
srvctl start nodeapps -n [node_name] Starts gsd, vip, listener, and ons
srvctl stop nodeapps -n [node_name] Stops gsd, vip and listener
-- =======================================================================
-- Basic RAC Management Commands
-- =======================================================================
ps -ef|grep d.bin Command to verify that the CRS background processes are actually running
crs_stat -t Command shows us the status of each registered resource in the cluster
crsstat Command shows output of crs_stat t formatted nicely
crsctl check crs Command verifies that the above background daemons are functioning
crsctl enable crs Root user to run this command. command to enable automatic re-start of CRS
crsctl disable crs Root user to run this command. Command will prevent CRS from starting on a reboot. Note there is no return output from the command.
crsctl stop crs Root user to run this command. It will stop all HA resources on the local node, and it will also stop the above mentioned background daemons
crsctl start crs Root user to run this command. It will start all HA resources on the local node
crs_stop -all script that stops the registered resources and leaves the CRS running. This includes all services in the cluster, so it will bring down all registered resources on all nodes
srvctl start nodeapps -n (node) command can shut down the nodeapps by replacing start with stop.
srvctl start asm -n (node)
srvctl start instance -d (database) -I (instance)
srvctl start service -d (database) -s (service)
-- =======================================================================
-- Starting the Oracle RAC Environment
-- =======================================================================
srvctl start nodeapps -n [hostname1]
srvctl start nodeapps -n [hostname2]
srvctl start asm -n [hostname1]
srvctl start asm -n [hostname2]
srvctl start instance -d db_name -i instancename1
srvctl start instance -d db_name -i instancename2
srvctl start database -d [database name]
srvctl start service -d [database name] -s [service name]
emctl start dbconsole
crs_stat -t
-- =======================================================================
-- Stopping the Oracle RAC Environment
-- =======================================================================
/* The first step is to stop the Oracle instance. Once the instance (and related services) are down, then bring down the ASM instance. Finally, shut down the node applications (Virtual IP, GSD, TNS Listener, and ONS). */
emctl stop dbconsole
srvctl stop service -d [database name] -s [service name]
srvctl stop instance -d db_name -i instancename1
srvctl stop instance -d db_name -i instancename2
or
srvctl stop database -d db_name -o immediate to bring all instances down
srvctl stop listener -n hostname1
srvctl stop listener -n hostname2
srvctl stop asm -n [hostname1]
srvctl stop asm -n [hostname2]
srvctl stop nodeapps -n [hostname1]
srvctl stop nodeapps -n [hostname2]
crs_stat -t
-- =======================================================================
-- Grid Control Agent
-- =======================================================================
Set environment to ‘agent’ using . oraenv
$ORACLE_HOME/bin/emctl start agent
$ORACLE_HOME/bin/emctl stop agent
-- =======================================================================
-- Verify and Explore the RAC Cluster and RAC Database Environment
-- =======================================================================
-- Check the status of application resources --
crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.devdb.db application ONLINE ONLINE rac1
ora....b1.inst application ONLINE ONLINE rac1
ora....b2.inst application ONLINE ONLINE rac2
ora....SM1.asm application ONLINE ONLINE rac1
ora....C1.lsnr application ONLINE ONLINE rac1
ora.rac1.gsd application ONLINE ONLINE rac1
ora.rac1.ons application ONLINE ONLINE rac1
ora.rac1.vip application ONLINE ONLINE rac1
ora....SM2.asm application ONLINE ONLINE rac2
ora....C2.lsnr application ONLINE ONLINE rac2
ora.rac2.gsd application ONLINE ONLINE rac2
ora.rac2.ons application ONLINE ONLINE rac2
ora.rac2.vip application ONLINE ONLINE rac2
-- Status of all instances and services --
srvctl status database -d devdb
Instance devdb1 is running on node rac1
Instance devdb2 is running on node rac2
-- Status of a single instance --
srvctl status instance -d devdb -i devdb2
Instance devdb2 is running on node rac2
-- Status of node applications on a particular node --
srvctl status nodeapps -n rac1
VIP is running on node: rac1
GSD is running on node: rac1
Listener is running on node: rac1
ONS daemon is running on node: rac1
srvctl status nodeapps -n rac2
VIP is running on node: rac2
GSD is running on node: rac2
Listener is running on node: rac2
ONS daemon is running on node: rac2
-- Status of an ASM instance --
srvctl status asm -n rac1
ASM instance +ASM1 is running on node rac1.
srvctl status asm -n rac2
ASM instance +ASM2 is running on node rac2.
-- List all configured databases --
srvctl config database
devdb
-- Display configuration for our RAC database --
srvctl config database -d devdb
rac1 devdb1 /u01/app/oracle/product/10.2.0/db_1
rac2 devdb2 /u01/app/oracle/product/10.2.0/db_1
-- Display the configuration for node applications - (VIP, GSD, ONS, Listener) --
srvctl config nodeapps -n rac1 -a -g -s -l
VIP exists.: /rac1-vip/192.168.2.31/255.255.255.0/eth0
GSD exists.
ONS daemon exists.
Listener exists.
-- Display the configuration for the ASM instance(s) --
srvctl config asm -n rac1
+ASM1 /u01/app/oracle/product/10.2.0/db_1
-- Check the status of Oracle Clusterware --
[rac1-]crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
[rac2-] crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
-- List the RAC instances --
SELECT inst_id, instance_number inst_no, instance_name inst_name, parallel, status, database_status db_status, active_state state, host_name host
FROM gv$instance ORDER BY inst_id;
INST_ID INST_NO INST_NAME PARALLEL STATUS DB_STATUS STATE HOST
__________ __________ ____________ ___________ _________ ____________ _________ ____________
1 1 cdbprod1 YES OPEN ACTIVE NORMAL rac1
2 2 cdbprod2 YES OPEN ACTIVE NORMAL rac2
-- Check connectivity --
/* Verify that you are able to connect to the instances and service on each node. */
sqlplus system@devdb1
sqlplus system@devdb2
sqlplus system@devdb
-- Check database configuration --
[rac1-]export ORACLE_SID=devdb1
[rac1-]sqlplus / as sysdba
show sga
Total System Global Area 209715200 bytes
Fixed Size 1218556 bytes
Variable Size 104859652 bytes
Database Buffers 100663296 bytes
Redo Buffers 2973696 bytes
-- Check all data files which are in the disk group --
select name from v$datafile
union
select member from v$logfile
union
select name from v$controlfile
union
select name from v$tempfile;
NAME
______________________________________________________________
++RECOVERYDEST/devdb/controlfile/current.256.578676737
++RECOVERYDEST/devdb/onlinelog/group_1.257.578676745
++RECOVERYDEST/devdb/onlinelog/group_2.258.578676759
++RECOVERYDEST/devdb/onlinelog/group_3.259.578682963
++RECOVERYDEST/devdb/onlinelog/group_4.260.578682987
++DG1/devdb/controlfile/current.256.578676735
++DG1/devdb/datafile/example.263.578676853
++DG1/devdb/datafile/indx.270.578685723
++DG1/devdb/datafile/sysaux.261.578676829
++DG1/devdb/datafile/system.259.578676767
++DG1/devdb/datafile/undotbs1.260.578676809
++DG1/devdb/datafile/undotbs1.271.578685941
++DG1/devdb/datafile/undotbs2.264.578676867
++DG1/devdb/datafile/undotbs2.272.578685977
++DG1/devdb/datafile/users.265.578676887
++DG1/devdb/datafile/users.269.578685653
++DG1/devdb/onlinelog/group_1.257.578676739
++DG1/devdb/onlinelog/group_2.258.578676753
++DG1/devdb/onlinelog/group_3.266.578682951
++DG1/devdb/onlinelog/group_4.267.578682977
++DG1/devdb/tempfile/temp.262.578676841
select file_name,bytes/1024/1024 from dba_data_files;
FILE_NAME BYTES/1024/1024
_______________________________________________ __________________
/ora01/oradata/cdbprod/cdbprod/users01.dbf 5
+DG1/devdb/datafile/users.259.606468449 5
+DG1/devdb/datafile/sysaux.257.606468447 240
+DG1/devdb/datafile/undotbs1.258.606468449 30
+DG1/devdb/datafile/system.256.606468445 480
+DG1/devdb/datafile/undotbs2.264.606468677 25
select group#, type, member, is_recovery_dest_file from v$logfile order by group#;
GROUP# TYPE MEMBER IS_RECOVERY_DEST_FILE
_________ ________ ___________________________________________________ ________________________
1 ONLINE +RECOVERYDEST/devdb/onlinelog/group_1.257.606468581 YES
1 ONLINE +DG1/devdb/onlinelog/group_1.261.606468575 NO
2 ONLINE +RECOVERYDEST/devdb/onlinelog/group_2.258.606468589 YES
2 ONLINE +DG1/devdb/onlinelog/group_2.262.606468583 NO
3 ONLINE +DG1/devdb/onlinelog/group_3.265.606468865 NO
3 ONLINE +RECOVERYDEST/devdb/onlinelog/group_3.259.606468875 YES
4 ONLINE +DG1/devdb/onlinelog/group_4.266.606468879 NO
4 ONLINE +RECOVERYDEST/devdb/onlinelog/group_4.260.606468887 YES
[rac1-] export ORACLE_SID=+ASM1
[rac1-] sqlplus / as sysdba
SQL] show sga
Total System Global Area 92274688 bytes
Fixed Size 1217884 bytes
Variable Size 65890980 bytes
ASM Cache 25165824 bytes
SQL] show parameter asm_disk
NAME TYPE VALUE
------------------------------ ----------- ------------------------
asm_diskgroups string DG1, RECOVERYDEST
asm_diskstring string
select group_number, name, allocation_unit_size alloc_unit_size, state, type, total_mb, usable_file_mb from v$asm_diskgroup;
ALLOC USABLE
GROUP UNIT TOTAL FILE
NUMBER NAME SIZE STATE TYPE MB MB
------ ------------ -------- ------- ------ ------ -------
1 DG1 1048576 MOUNTED NORMAL 6134 1868
2 RECOVERYDEST 1048576 MOUNTED EXTERN 2047 1713
select name, path, header_status, total_mb free_mb, trunc(bytes_read/1024/1024) read_mb,trunc(bytes_written/1024/1024) write_mb from v$asm_disk;
NAME PATH HEADER_STATU FREE_MB READ_MB WRITE_MB
-------- ------------------ ------------------------ ----------------- ----------------- -----------------
VOL1 ORCL:VOL1 MEMBER 3067 229 1242
VOL2 ORCL:VOL2 MEMBER 3067 164 1242
VOL3 ORCL:VOL3 MEMBER 2047 11 354
-- Check flash recovery area space usage --
select * from v$recovery_file_dest;
NAME SPACE_LIMIT SPACE_USED SPACE_RECLAIMABLE NUMBER_OF_FILES CON_ID
_____________ ________________ ______________ ____________________ __________________ _________
+RECOVERYDEST 1572864000 331366400 0 7 0
select * from v$flash_recovery_area_usage;
FILE_TYPE PERCENT_SPACE_USED PERCENT_SPACE_RECLAIMABLE NUMBER_OF_FILES
------------ ------------------ ------------------------- ---------------
CONTROLFILE .97 0 1
ONLINELOG 20 0 6
ARCHIVELOG 0 0 0
BACKUPPIECE 0 0 0
IMAGECOPY 0 0 0
FLASHBACKLOG 0 0 0