Drop Down MenusCSS Drop Down MenuPure CSS Dropdown Menu

Wednesday, June 28, 2017

Important Query

Patch:
++++
SELECT
      DISTINCT RPAD(a.bug_number,11)
      || RPAD(e.patch_name,11)
      || RPAD(TRUNC(c.end_date),12)
      || RPAD(b.applied_flag,4)
FROM
      ad_bugs a,
      ad_patch_run_bugs b,
      ad_patch_runs c,
      ad_patch_drivers d ,
      ad_applied_patches e
WHERE
      a.bug_id = b.bug_id AND
      b.patch_run_id = c.patch_run_id AND
      c.patch_driver_id = d.patch_driver_id AND
      d.applied_patch_id = e.applied_patch_id AND
      a.bug_number in ('16278172','16502567') ORDER BY 1 DESC;

$ORACLE_HOME/OPatch/opatch lsinventory |grep -i ^Patch
$ORACLE_HOME/OPatch/opatch lsinventory -bugs_fixed | grep -i -E 'DATABASE PSU|DATABASE PATCH SET UPDATE'
export PATH=$ORACLE_HOME/OPatch:$PATH
$ORACLE_HOME/OPatch/opatch lsinventory -bugs_fixed |grep -i "PATCH SET UPDATE"
$ export PATH=$PATH:$ORACLE_HOME/bin:$ORACLE_HOME/OPatch
$ date
$ opatch lsinventory|grep ^Patch

export PATH=$PATH:$ORACLE_HOME/OPatch
$ORACLE_HOME/OPatch/opatch lsinventory -invPtrLoc $ORACLE_HOME/oraInst.loc -bugs_fixed | grep -i 'PATCH SET UPDATE'
export PATH=$PATH:$ORACLE_HOME/OPatch

URL:
++++
select home_url from apps.icx_paratmeter;

Redo Details:
+++++++++
set lines 200 pages 200
col GROUP for a10
col TYPE for a20
col MEMBER for a100
col IS_ for a70
select * from v$log;
select * from v$logfile order by GROUP#;

Invalid Object:
++++++++++
spool /tmp/md/invalid_obj_after.txt
set pages 10000 lines 150
col owner for a30
col object_name for a50
col object_type for a30
select owner, object_name, object_type,status
from dba_objects
where status like 'INVALID'
order by 1,2,3 asc ;
exit ;

Instance:
+++++++++
set lines 1000
set pages 60
col host_name format a25
select INST_ID,INSTANCE_NAME,HOST_NAME,VERSION,to_char(STARTUP_TIME,'DD-MON-RRRR HH:MM:SS') "STARTUP_TIME",STATUS,LOGINS from gv$instance order by 1; 
select name,open_mode,controlfile_type from gv$database;
select database_role from gv$database;

Node Details:
+++++++++++++

set lines 200
col host_name for a20
col node_name for a20
col database for a10
col concmgr for a9
col forms for a7
col webserver for a11
col admin for a7
select
NODE_NAME,
decode(STATUS,'Y','ACTIVE','INACTIVE') Status,
decode(SUPPORT_CP,'Y', 'YES','NO') ConcMgr,
decode(SUPPORT_FORMS,'Y','YES', 'NO') Forms,
decode(SUPPORT_WEB,'Y','YES', 'NO') WebServer,
decode(SUPPORT_ADMIN, 'Y','YES', 'NO') Admin,
decode(SUPPORT_DB, 'Y','YES', 'NO') Database
from apps.fnd_nodes
where node_name != 'AUTHENTICATION' order by NODE_NAME;
>>>>>
set lines 1000
set pages 60
col host_name format a25
select INST_ID,INSTANCE_NAME,HOST_NAME,VERSION,to_char(STARTUP_TIME,'DD-MON-RRRR HH:MM:SS') "STARTUP_TIME",STATUS,LOGINS from gv$instance order by 1;
select node_name,to_char(CREATION_DATE,'DD-MON-RRRR HH:MM:SS') "Created on", to_char(LAST_UPDATE_DATE,'DD-MON-RRRR HH:MM:SS') "Updated On", SUPPORT_CP "CP", SUPPORT_FORMS "FORMS",SUPPORT_WEB "WEB",SUPPORT_ADMIN "ADMIN",STATUS from apps.fnd_nodes order by 2;
select home_url from apps.icx_parameters;
select release_name from apps.fnd_product_groups;

Password Creation:
+++++++++++++
orapwd file=$ORACLE_HOME/dbs/orapw$ORACLE_SID password=<sys_password>12 entries=5

tuning related
++++++++++
var v_num number;
exec:v_num:=dbms_spm.load_plans_from_cursor_cache(sql_id=>'2ym9kd2wcbr77',plan_hash_value => 4127271796);
==================
select s.INST_ID,s.SID,s.serial#,s.event,s.last_call_et,s.SQL_ID,r.PHASE_CODE,r.STATUS_CODE,r.PARENT_REQUEST_ID,r.ACTUAL_START_DATE,r.ACTUAL_COMPLETION_DATE from gv$session s,apps.fnd_concurrent_requests r where s.audsid=r.oracle_session_id and r.request_id=16624610;

select table_name, stale_stats,last_analyzed from dba_tab_statistics where table_name='&1';
select owner, index_name, last_analyzed, global_stats from dba_indexes where index_name='&1';

select * from table(dbms_xplan.display_awr('d6xu9cxq57ant'));

Killing in RAC ::::::::::
SELECT  'ALTER SYSTEM KILL SESSION '||''''||sid||','||serial#||','|| '@' || inst_id ||''''|| ';' from gv$session where osuser = 'xxwwexp' and machine in ('ccixapp-prod1-128','ccixapp-prod2-155');

SELECT  'ALTER SYSTEM KILL SESSION '||''''||sid||','||serial#||','|| '@' || inst_id ||''''|| ';' from gv$session where sql_id in ('az41pugxbqdha','cmndhqbj6h5dj','d6xu9cxq57ant');


=======================
select s.snap_id,to_char(s.begin_interval_time,'DD-MON-YYYY DY HH24:MI') sample_end
, q.sql_id
, q.plan_hash_value
, q.EXECUTIONS_DELTA "executions"
, q.VERSION_COUNT "VersionCount"
, q.INVALIDATIONS_DELTA  "Invalidation"
, round(disk_reads_delta/executions_delta) "Phy IO/Exec"
, round(buffer_gets_delta/executions_delta) "LogIO/Exec"
, round(elapsed_time_delta*0.0000001/executions_delta,2) "Elapsed_Time_In_Sec/Exec"
, round(cpu_time_delta*0.0000001/executions_delta,2) "CPU_Time_In_Sec/Exec"
, round(rows_processed_delta/executions_delta) "Rows/Exec"
from dba_hist_sqlstat q, dba_hist_snapshot s
where q.SQL_ID=trim('&sql_id')
and s.snap_id = q.snap_id
and s.dbid = q.dbid
and s.instance_number = q.instance_number
and q.executions_delta > 0
and begin_interval_time BETWEEN (SYSDATE - nvl(to_number('&num_days'),1)) AND SYSDATE
order by q.sql_id,s.snap_id desc;

============
Child request:
set lines 200
col USER_CONCURRENT_PROGRAM_NAME for a40
col PHASE_CODE for a10
col STATUS_CODE for a10
col COMPLETION_TEXT for a20
SELECT sum.request_id,req.PARENT_REQUEST_ID,sum.user_concurrent_program_name, DECODE
(sum.phase_code,'C','Completed',sum.phase_code) phase_code, DECODE(sum.status_code,'D',
'Cancelled' ,
'E', 'Error' , 'G', 'Warning', 'H','On Hold' , 'T', 'Terminating', 'M', 'No Manager' , 'X',
'Terminated', 'C', 'Normal', sum.status_code) status_code, sum.actual_start_date,
sum.actual_completion_date, sum.completion_text FROM apps.fnd_conc_req_summary_v sum,
apps.fnd_concurrent_requests req where req.request_id=sum.request_id and req.PARENT_REQUEST_ID =
'&parent_concurrent_request_id';

============
Manager name:
select b.USER_CONCURRENT_QUEUE_NAME from apps.fnd_concurrent_processes a,
apps.fnd_concurrent_queues_vl b, apps.fnd_concurrent_requests c
where  a.CONCURRENT_QUEUE_ID = b.CONCURRENT_QUEUE_ID
and    a.CONCURRENT_PROCESS_ID = c.controlling_manager
and    c.request_id ='&request_id';

To get Hash Values from SQLID:

SET PAUSE ON
SET PAUSE 'Press Return to Continue'
SET PAGESIZE 60
SET LINESIZE 300

SELECT DISTINCT sql_id, plan_hash_value
FROM dba_hist_sqlstat q,
    (
    SELECT /*+ NO_MERGE */ MIN(snap_id) min_snap, MAX(snap_id) max_snap
    FROM dba_hist_snapshot ss
    WHERE ss.begin_interval_time BETWEEN (SYSDATE - &No_Days) AND SYSDATE
    ) s
WHERE q.snap_id BETWEEN s.min_snap AND s.max_snap
  AND q.sql_id IN ( '&SQLID');

Schema Size:

set linesize 150
set pagesize 5000
col owner for a155

select OWNER,sum(bytes)/1024/1024/1000 “SIZE_IN_GB” from dba_segments group by owner order by owner;

Find OPP Log File Using Concurrent Request ID:
Select
fcpp.concurrent_request_id req_id,
fcp.node_name,
fcp.logfile_name
FROM
fnd_conc_pp_actions fcpp,
fnd_concurrent_processes fcp
where
fcpp.processor_id = fcp.concurrent_process_id
AND
fcpp.action_type = 6
AND
fcpp.concurrent_request_id ='&1';

To check Indexes are on a Table:

SELECT index_name
FROM    all_indexes
WHERE table_name = `&table_name`;

To check which Columns are Indexed on a Table:

SELECT column_name, index_name
FROM     all_ind_columns
WHERE  table_name = `&table_name`;

U = unique index
N = non-unique index

To Check Triggers on a Table:

SELECT trigger_name
FROM   all_triggers
WHERE table_name = `&table_name`;


To check the maximum number of allowed connections to an Oracle database:

SELECT
  'Currently, '
  || (SELECT COUNT(*) FROM V$SESSION)
  || ' out of '
  || VP.VALUE
  || ' connections are used.' AS USAGE_MESSAGE
FROM
  V$PARAMETER VP
WHERE VP.NAME = 'sessions';

Currently, 2311 out of 6024 connections are used.
SQL>

To get Forms run away process:

ps -ef|grep 26346

col sid format 999999
col username format a20
col osuser format a15
select b.spid,a.sid, a.serial#,a.username, a.osuser
from v$session a, v$process b
where a.paddr= b.addr
and b.spid='&spid'
order by b.spid;
SQL> SQL> SQL>   2    3    4    5  Enter value for spid: 26346
old   4: and b.spid='&spid'
new   4: and b.spid='26346'

no rows selected


SQL>
===========
DPSERVER_STATUS

set heading off echo off feedback off
SELECT DECODE(Cdr_Exe_Service.getSrvcLocLockStatus(LOCATION_NAME,0) ,1,'Running' ,0,'Down') DPSERVER_STATUS_RUNNING  FROM (select csrvloc.location_name from apps.cdr_service_locations csrvloc , apps.fnd_nodes fnod,apps.cdr_services csrv where upper(csrvloc.machine) like (upper(fnod.node_name)||'%') and csrvloc.SERVICE_LOCATION_ID in csrv.SERVICE_LOCATION_ID) where rownum=1;

select * from (select ':'||csrvloc.machine||':' from apps.cdr_service_locations csrvloc , apps.fnd_nodes fnod, apps.cdr_services csrv where upper(csrvloc.machine) like (upper(fnod.node_name)||'%')and csrvloc.SERVICE_LOCATION_ID in csrv.SERVICE_LOCATION_ID) where rownum=1;

select * from (select csrv.ROOT_DIRECTORY from cdr_services csrv,apps.cdr_service_locations csrvloc, fnd_nodes fnod where csrv.SERVICE_LOCATION_ID=csrvloc.SERVICE_LOCATION_ID and upper(csrvloc.machine) like (upper(fnod.node_name)||'%') and csrv.ROOT_DIRECTORY is not null) where rownum=1;

SAS-------------------------

set heading off echo off feedback off
SELECT DECODE(Cdr_Exe_Service.getSrvcLocLockStatus(LOCATION_NAME,0) ,1,'Running' ,0,'Down') DPSERVER_STATUS_RUNNING  FROM (select csrvloc.location_name from apps.cdr_service_locations csrvloc ,apps.cdr_services csrv where LOCATION_NAME like '%SAS%' and csrvloc.SERVICE_LOCATION_ID in csrv.SERVICE_LOCATION_ID)where rownum=1;

select * from (select ':'||machine||':' from apps.cdr_service_locations where upper(LOCATION_NAME) like '%SAS%') where rownum=1;


select * from (select csrv.ROOT_DIRECTORY from cdr_services csrv,apps.cdr_service_locations csrvloc  where csrv.SERVICE_LOCATION_ID=csrvloc.SERVICE_LOCATION_ID and upper(csrvloc.LOCATION_NAME) like '%SAS%' and csrv.ROOT_DIRECTORY is not null) where rownum=1;

================
-- -----------------------------------------------------------------------------------
-- File Name    : https://oracle-base.com/dba/monitoring/ts_free_space.sql
-- Author       : Tim Hall
-- Description  : Displays a list of tablespaces and their used/full status.
-- Requirements : Access to the DBA views.
-- Call Syntax  : @ts_free_space.sql
-- Last Modified: 13-OCT-2012 - Created. Based on ts_full.sql
--                22-SEP-2017 - LINESIZE set.
-- -----------------------------------------------------------------------------------
Identify Tablespaces with Free Space:

SET PAGESIZE 140 LINESIZE 200
COLUMN used_pct FORMAT A11

SELECT tablespace_name,
       size_mb,
       free_mb,
       max_size_mb,
       max_free_mb,
       TRUNC((max_free_mb/max_size_mb) * 100) AS free_pct,
       RPAD(' '|| RPAD('X',ROUND((max_size_mb-max_free_mb)/max_size_mb*10,0), 'X'),11,'-') AS used_pct
FROM   (
        SELECT a.tablespace_name,
               b.size_mb,
               a.free_mb,
               b.max_size_mb,
               a.free_mb + (b.max_size_mb - b.size_mb) AS max_free_mb
        FROM   (SELECT tablespace_name,
                       TRUNC(SUM(bytes)/1024/1024) AS free_mb
                FROM   dba_free_space
                GROUP BY tablespace_name) a,
               (SELECT tablespace_name,
                       TRUNC(SUM(bytes)/1024/1024) AS size_mb,
                       TRUNC(SUM(GREATEST(bytes,maxbytes))/1024/1024) AS max_size_mb
                FROM   dba_data_files
                GROUP BY tablespace_name) b
        WHERE  a.tablespace_name = b.tablespace_name
       )
ORDER BY tablespace_name;

SET PAGESIZE 14



RAC(Real Application cluster) Details

RAC:
Allows multiple instances to access a single database.
A Cluster is a feature of High Availability, where it eliminates single point of failure.
11g2 Rac feature

  • We can store everything on the ASM. We can store OCR & voting files also on the ASM.
  • Single Client Access Name (SCAN) - eliminates the need to change tns entry when nodes are added to or removed from the Cluster. RAC instances register to SCAN listeners as remote listeners. SCAN is fully qualified name. Oracle recommends assigning 3 addresses to SCAN, which create three SCAN listeners.
  • By default, LOAD_BALANCE is ON.
  • GSD (Global Service Deamon), gsdctl introduced.
  • Grid Naming Service (GNS) is a new service introduced in Oracle RAC 11g R2. With GNS, Oracle Clusterware (CRS) can manage Dynamic Host Configuration Protocol (DHCP) and DNS services for the dynamic node registration and configuration
  • Oracle Local Registry (OLR) - From Oracle 11gR2 "Oracle Local Registry (OLR)" something new as part of Oracle Clusterware.The Oracle Local Registry (OLR) is similar to the Oracle Cluster Registry, but it only stores information about the local node. The OLR is not shared by other nodes in the cluster and is used by the OHASd while starting or joining the cluster.The OLR stores information that is typically required by the OHASd, such as the version of Oracle Clusterware, the configuration, and so on. Oracle stores the location of the OLR in a text file named /etc/oracle/olr.loc. This file will have the location of the OLR configuration file $GRID_HOME/cdata/<hostname.olr>. 
  • What is OLR and why it is required?
While starting clusterware, it need to access the OCR , to know which resources it need to start. However the OCR file is stored inside ASM, which is not accessible at this point( because ASM resource also present in OCR file.
    To avoid this, The resources which need to be started on node is stored in  operating file system called as OLR ( Oracle local registry). Each node will have their OLR file.
      Where the OLR is stored? When olr backup is created.

      By default, OLR is located at Grid_home/cdata/host_name.olr

      The OLR is backed up after an installation or an upgrade. After that time, you can only manually back up the OLR. Automatic backups are not supported for the OLR.

      If olr file is missing ,How can you restore olr file from backup

      # crsctl stop crs -f
      # touch $GRID_HOME/cdata/<node>.olr
      # chown root:oinstall $GRID_HOME/cdata/<node>.olr
      # ocrconfig -local -restore $GRID_HOME/cdata/<node>/backup_<date>_<num>.olr
      # crsctl start crs

      Someone deleted the olr file by mistake and currently no backups are available . What will be the impact and how can you fix it?

      If OLR is missing , then if the cluster is already running, then cluster will run fine. But if you try to restart it , It will fail.

      So you need to do below activities.

      On the failed node:

      # $GRID_HOME/crs/install/rootcrs.pl -deconfig -force
      # $GRID_HOME/root.sh
      • Multicasting is introduced in 11gR2 for private interconnect traffic.
      • In Oracle 10g RAC and 11gR1 RAC,  Oracle clusterware and ASM are installed in the different Oracle homes, and the Clusterware has to be  up before ASM instance can be started because ASM instance uses the clusterware to access the shared storage.  Oracle 11g R2 introduced the  grid infrastructure home which combines Oracle clusterware and ASM.  The OCR and votingdisk of 11g R2 clusterware can be stored in ASM.  
      Oracle Clusterware:
      Clusterware is the software. Clusterware is run by Cluster Ready Services (CRS) using two key components –voting disk to record node membership information and Oracle Cluster Registry (OCR). Clusterware monitors all components like instances and listeners.

      Voting Disk –
      Voting disks are important component of Oracle Clusterware.It is file that resides on shared storage and Primary function of voting disks is to manage node membership and prevent SPLITBRAIN Syndrome.Voting disk reassigns cluster ownership between the nodes in case of failure.Each voting disk must be accessible by all nodes in the cluster.If any node is not passing heat-beat across other note or voting disk, then that node will be evicted by Voting disk.We must have odd number of disks.Oracle recommends minimum of 3 and maximum of 5. In 10g, Clusterware can supports 32 voting disks but in 11gR2 supports 15 voting disks.
      In 11g Release 2 ,voting disk data is automatically backed up in the OCR whenever there is a configuration change.Oracle recommends NOT used dd command to backup or restore as this can lead to loss of the voting disk.
      Also the data is automatically restored to any voting that is added.
      What Information is stored in VOTING DISK/FILE
      It contains 2 types of data .
      Static data : Info about the nodes in cluster
      Dynamic data: Disk heartbeat logging
      It contains the important details of the cluster nodes membership like
      a.       Which node is part of the cluster.
      b.      Which node is leaving the cluster and
      c.       Which node is joining the cluster.

      What is the purpose of Voting disk?

      Voting disk stores information about the nodes in the cluster and their heartbeat information. Also stores information about cluster membership.

      Why we need voting disk?

      Oracle Clusterware uses the VD to determine which nodes are members of a cluster. Oracle Cluster Synchronization Service daemon (OCSSD) on each cluster node updates the VD with the current status of the node every second. The VD is used to determine which RAC nodes are still in the cluster should the interconnect heartbeat between the RAC nodes fail.

      To find current location of Voting disk:
      [oracle@rsingle ~]$ crsctl query css votedisk

      Voting disk backup  (In 10g)
      dd if=<voting-disk-path> of=<backup/path>

      Add/delete vote disk
      crsctl add css votedisk <path> -adds a new voting disk
      crsctl delete css votedisk <path> -- deletes the voting disk

      OCR:
      OCR is the central repository for CRS and it stores details about the services and status of the resources.It is a binary file which resides on shared storage and accessiable by all nodes.It created at the time of Grid Installation. It’s store information to manage Oracle cluster-ware and it’s component such as RAC database,listener, VIP,Scan IP & Services.Ocr contain information such as which database instance runs on which node and which services runs on which database.Oracle Clusterware automatically creates OCR backups every 4 hours. At any one time, Oracle Clusterware always retains the latest 3 backup copies of the OCR that are 4 hours old, 1 day old and 1 week old.

      Oracle stores the location of the OCR file in a text file called ocr.loc, which is located in different places depending on the operating system. For example, on Linux-based systems the ocr.loc file is placed under the /etc/oracle directory, and for UNIX-based systems the ocr.loc is placed in /var/opt/oracle. Windows systems use the registry key Hkey_Local_Machine\software\Oracle\ocr to store the location of the ocr.loc file.

      What is OCR and what it contains?
      OCR is the central repository for CRS, which stores the metadata, configuration and state information for all cluster resources defined in clusterware.It is a binary file which resides on shared storage and accessiable by all nodes.It created at the time of Grid Installation. 
      • node membership information
      • status of cluster resources like database,instance,listener,services
      • ASM DISKGROUP INFORMATION
      • Information ocr,vd and its location and backups.
      • vip and scan vip details.
      Who updates OCR and how/when it gets updated?

      OCR is updated by clients application and utilities through CRSd process.

      1.tools like DBCA,DBUA,NETCA,ASMCA,CRSCTL,SRVCTL through CRsd process.

      2. CSSd during cluster setup

      3.CSS during node addition/deletion.

      Each node maintains a copy of OCR in the memory. Only one CRSd(master) , performs read, write to the OCR file . Whenever some configuration is changed, CRSd process will refresh the local OCR cache and remote OCR cache and updates the OCR FILE in disk.

      So whenever we try get cluster information using srvctl or crsctl , then it uses the local ocr for fetching the data . But when it modify , then through CRSd process, it will updates the ocr physical file).

      OCR file has been corrupted, there is no valid backup of OCR. What will be the action plan?

      In this case , we need to deconfig and reconfig.

      deconfig can be done using rootcrs.sh -deconfig option

      and reconfig can be done using gridsetup.sh script.

      There is no way to customize the backup frequencies or the number of files that Oracle Grid Infrastructure retains while automatically backing OCR.
      To find location of Corrent OCR:
      [oracle@rsingle ~]$ ocrcheck
      ./ocrconfig -manualbackup
      ./ocrconfig -showbackup

      Important daemons:

      The CRS has four components and that run as deamons or processes.
      OPROCd - Process Monitor Daemon
      CRSd - CRS daemon, the failure of this daemon results in a node being reboot to avoid data corruption
      OCSSd - Oracle Cluster Synchronization Service Daemon (updates the registry)

      [ocssd, crsd, evmd, oprocd, racgmain, racgimon]

      EVMd - Event Volume Manager Daemon.

      Oracle High Availability Services Daemon (OHASD):
      OHAS is implemented via a new daemon process which is called ohasd.

      Oracle High Availability Services Daemon (OHASD) anchors the lower part of the Oracle Clusterware stack, which consists of processes that facilitate cluster operations in RAC databases.  This includes the GPNPD, GIPC, MDNS and GNS background processes.

      To enable OHAS :crsctl enable crs and this will cause OHAS to autostart when each node re-boots.  To verify that OHAS is running, check for the CRS-4123 message in your alert log.

      CRS:Cluster Ready Services:
      ps -ef | grep -i crs | grep -v grep
      root 25863 1 1 Oct27 ? 11:37:32 /opt/oracle/grid/product/11.2.0/bin/crsd.bin
      crsd.bin:
      The above process is responsible for start, stop, monitor and failover of resource.It maintains OCR and also restarts the resources when the failure occurs.Daemon restarted automatically, no node restart Run as root user.

      CSS:Cluster Synchronization Service
      $ ps -ef | grep -v grep | grep css
      root 19541 1 0 Oct27 ? 00:05:55 /opt/oracle/grid/product/11.2.0/bin/cssdmonitor
      root 19558 1 0 Oct27 ? 00:05:45 /opt/oracle/grid/product/11.2.0/bin/cssdagent
      oragrid 19576 1 6 Oct27 ? 2-19:13:56 /opt/oracle/grid/product/11.2.0/bin/ocssd.bin

      CSS has three separate processes: the CSS daemon (ocssd), the CSS Agent (cssdagent), and the CSS Monitor (cssdmonitor).
      CSS Monitor (cssdmonitor):
      Monitors node hangs(via oprocd functionality) and monitors OCCSD process hangs (via oclsomon functionality) and monitors vendor clusterware(via vmon functionality).

      CSS Agent (cssdagent):
      Spawned by OHASD process.Previously(10g) oprocd, responsible for I/O fencing.Killing this process would cause node reboot.Stops,start checks the status of occsd.bin daemon.

      CSS daemon (ocssd):occsd.bin:
      Manages cluster node membership runs as oragrid user.Failure of this process results in node restart.


      EVM:Event Manager:
      background process that publishes Oracle Clusterware events.It monitors the message flow between the nodes and logs the relevant event information to the log files.

      Diskmon :
      Disk Monitor daemon (diskmon): Monitors and performs input/output fencing for Oracle Exadata Storage Server. As Exadata storage can be added to any Oracle RAC node at any point in time, the diskmon daemon is always started when ocssd is started.

      ONS/eONS:
      ONS is Oracle Notification Service. eONS is a Java Process.

      OPROCD:
      Runs as root and provides node fencing instead of hangcheck timer kernel module

      RACG
      CTSS:
      Cluster time synchronisation daemon(ctssd) to manage the time syncrhonization between nodes, rather depending on NTP.
      Gipcd :
      Grid IPC daemon (gipcd): Is a helper daemon for the communications infrastructure
      Oracle Agent:
      Oracle Root Agent:Orarootagent :
      Is a specialized oraagent process that helps CRSD manage resources owned by root, such as the network, and the Grid virtual IP address
      Oclskd :
      Cluster kill daemon (oclskd): Handles instance/node evictions requests that have been escalated to CSS .
      Oracle High Availability Service:
      Gnsd :
      Oracle Grid Naming Service (GNS): Is a gateway between the cluster mDNS and external DNS servers. The GNS process performs name resolution within the cluster.
      Mdnsd :
      Multicast domain name service (mDNS): Allows DNS requests. The mDNS process is a background process on Linux and UNIX, and a service on Windows.

      Oracle RAC instances are composed of following background processes:
      LMON    — Global Enqueue Service Monitor
      LMD     — Global Enqueue Service Daemon
      LMS     — Global Cache Service Process
      LCK0    — Instance Enqueue Process
      DIAG    — Diagnosability Daemon
      RMSn    — Oracle RAC Management Processes (RMSn)
      RSMN    — Remote Slave Monitor
      DBRM    — Database Resource Manager (from 11g R2)
      PING    — Response Time Agent (from 11g R2)
      ACMS    — Atomic Control file to Memory Service (ACMS)(from Oracle 11g)
      GTX0-j  — Global Transaction Process (from Oracle 11g)

      What is Split Brain?
      In a Oracle RAC environment all the instances/servers communicate with each other using high-speed interconnects on the private network. This private network interface or interconnect are redundant and are only used for inter-instance oracle data block transfers. Now talking about split-brain concept with respect to oracle rac systems, it occurs when the instance members in a RAC fail to ping/connect to each other via this private interconnect, but the servers are all pysically up and running and the database instance on each of these servers is also running. These individual nodes are running fine and can conceptually accept user connections and work independently. So basically due to lack of commincation the instance thinks that the other instance that it is not able to connect is down and it needs to do something about the situation. The problem is if we leave these instance running, the sane block might get read, updated in these individual instances and there would be data integrity issue, as the blocks changed in one instance, will not be locked and could be over-written by another instance. Oracle has efficiently implemented check for the split brain syndrome.

      In a cluster, a private interconnect is used by cluster nodes to monitor each node’s status and communicate with each other. When two or more nodes fail to ping or connect to each other via this private interconnect, the cluster gets partitioned into two or more smaller sub-clusters each of which cannot talk to others over the interconnect. Oblivious of the existence of other cluster fragments, each sub-cluster continues to operate independently of the others. This is called “Split Brain”. In such a scenario, integrity of the cluster and its data might be compromised due to uncoordinated writes to shared data by independently operating nodes. Hence, to protect the integrity of the cluster and its data, the split-brain must be resolved.

      **
      Split brain syndrome occurs when the instances in a RAC fails to connect or ping to each other via the private interconnect. So, in a two node situation both the instances will think that the other instance is down because of lack of connection.The problem which could arise out of this situation is that the same block might get read, updated in these individual instances which cause data integrity issues, because the block changed in one instance will not be locked and could be overwritten by another instance.Both Instance start working independently.


      Interconnect –
      is private network that connects all the servers in cluster.
      Interconnect uses switch that only nodes in cluster can access. Instances in cluster communicate to each other via interconnect.

      Global Resource Directory (GRD):.It records and stores current status of datablock whenever block is transferred from a local cache to another indtance GRD is updated.
      It has two part:
      1.GCS(Global cache service)
      2.GES(Global enqueue services)

      Global Cache Service (GCS): Management of data sharing and exchange is done by GCS.It contain information of current lock and instance waiting block to acquire the lock.LMS background process.
      Global En-queue Service (GES):It handle non-datablock resources and control on dictionary and library cache lock and trasanction. LMD.

      SCAN:Single Client Access Name
      Oracle RAC 11g release 2 introduces the Single Client Access Name (SCAN),which provides a single name for clients to access Oracle Databases running in a cluster and simplify the database connection strings that an Oracle Client uses to connect.eliminates the need to change TNSNAMES entry when nodes are added to or removed from the Cluster.

      Difference between CRSCTL and SRVCTL?

      Crsctl command is used to manage the elements of the clusterware (crs,cssd, OCR,voting disk etc.)  while srvctl is used to manage the elements of the cluster  (databases,instances,listeners, services etc) .
      Both command were introduced with Oracle 10g and have been improved since.


      What is cache fusion?
      Transferring of data between RAC instances by using private network.Cache Fusion is the remote memory mapping of Oracle buffers, shared between the caches of participating nodes in the cluster. When a block of data is read from datafile by an instance within the cluster and another instance is in need of the same block, it is easy to get the block image from the instance which has the block in its SGA rather than reading from the disk.

      Cache Fusion Oracle RAC transfer the data block from buffer cache of one instance to the buffer cache of another instance using the cluster high speed interconnect. 

      27. What is FAN?
      Ans:
      Applications can use Fast Application Notification (FAN) to enable rapid failure detection, balancing of connection pools after failures, and re-balancing of connection pools when failed components are repaired. The FAN process uses system events that Oracle publishes when cluster servers become unreachable or if network interfaces fail.

      28. What is FCF?
      Ans:
      Fast Connection Failover provides high availability to FAN integrated clients, such as clients that use JDBC, OCI, or ODP.NET. If you configure the client to use fast connection failover, then the client automatically subscribes to FAN events and can react to database UP and DOWN events. In response, Oracle gives the client a connection to an active instance that provides the requested database service.

      29. What is TAF and TAF policies?
      Ans:
      Transparent Application Failover (TAF) - A runtime failover for high availability environments, such as Real Application Clusters and Oracle Real Application Clusters Guard, TAF refers to the failover and re-establishment of application-to-service connections. It enables client applications to automatically reconnect to the database if the connection fails, and optionally resume a SELECT statement that was in progress. This reconnect happens automatically from within the Oracle Call Interface (OCI) library.

      44. Why Clusterware installed in root (why not oracle)?

      46. What is the difference between cr block and cur (current) block?

      43. What is fencing?
      Ans:
      I/O fencing prevents updates by failed instances, and detecting failure and preventing split brain in cluster. When a cluster node fails, the failed node needs to be fenced off from all the shared disk devices or diskgroups. This methodology is called I/O Fencing, sometimes called Disk Fencing or failure fencing.

      Grid Naming Service (GNS):It is another new service introduced in Oracle RAC 11g R2. With GNS, Oracle Cluster Software (CRS) can manage DHCP and DNS Services for the dynamic node registration and configuration.

      Major RAC wait events?
      In RAC environment the buffer cache is global across all instances in the cluster and hence the processing differs.The most common wait events related to this are gc cr request and gc buffer busy

      GC CR request: the time it takes to retrieve the data from the remote cache

      Reason: RAC Traffic Using Slow Connection or Inefficient queries (poorly tuned queries will increase the amount of data blocks
      requested by an Oracle session. The more blocks requested typically means the more often a block will need to be read from a remote instance via the interconnect.)
      GC BUFFER BUSY: It is the time the remote instance locally spends accessing the requested data block.

      To verify that RAC instances are running?

      select * from V$ACTIVE_INSTANCES;
      select inst_id,username,failover_method,failover_type,failed_over from gv$session where username='&username';

      ACMS?
      ACMS stands for Atomic Controlfile Memory Service.In an Oracle RAC environment ACMS is an agent that ensures a distributed SGA memory update(ie)SGA updates are globally committed on success or globally aborted in event of a failure.

      How does OCSSD starts first if voting disk & OCR resides in ASM Diskgroups?
      You might wonder how CSSD, which is required to start the clustered ASM instance, can be started if voting disks are stored in ASM? 

      Without access to the voting disks there is no CSS, hence the node cannot join the cluster.
      But without being part of the cluster, CSSD cannot start the ASM instance.
      To solve this problem the ASM disk headers have new metadata in 11.2:
      you can use kfed to read the header of an ASM disk containing a voting disk.
      The kfdhdb.vfstart and kfdhdb.vfend fields tell CSS where to find the voting file. This does not require the ASM instance to be up.
      Once the voting disks are located, CSS can access them and joins the cluster.

      In Oracle RAC clusters, we see three types of IP addresses:

      Public IP:  The public IP address is for the server.  This is the same as any server IP address, a unique address with exists in /etc/hosts.
      Private IP: Oracle RCA requires "private IP" addresses to manage the CRS, the clusterware heartbeat process and the cache fusion layer.
      Virtual IP:  Oracle uses a Virtual IP (VIP) for database access.  The VIP must be on the same subnet as the public IP address.  The VIP is used for RAC failover (TAF).

      VIP, A Virtual IP is nothing but another IP which runs on same interface eth0 as your Public IP.
      This VIP is available on all nodes like your each node individual. Your listener is aware of both Public IP & vip.
      It listens to public IP & VIP. Incase of a fail over the vip of Node-1 shifted to Node# 2.

      roothas.sh Vs rootcrs.sh

      Both resides in $GRID_HOME/oui/bin
      roothas.pl will be useful or used when you run the grid infrastructure in standalone mode (single node Cluster)
      rootcrs.pl will be useful or used when you run the grid infrastructure in normal mode (normal cluster comprising of one or more node)

      Refer: https://www.dbaplus.ca/2020/05/12201-initohasd-does-not-start.html

      What is node eviction?

      Word Meaning: to force(someone) to leave the place.

      The process of removing the failed(due to various reasons) node from the cluster is known as eviction. Prior to 11gR2 Oracle tries to prevent from split brain situation by quickly rebooting the failed node . After 11gr2 Clusterware will attempt to clean up the failed resources . If the clusterware is able to clean up the failed resources, OHASD will try to restart the CRS stack. Once this task is done all the cluster resources on that node will be started automatically. This is called reboot less fencing(or eviction). If clusterware can not stop or clean the failed resources then it will roboot the node.

      Causes of node eviction :

      -Missing network heartbeat
      -Missing disk heartbeat
      -CPU starvation issues
      -Hanging cluster processes
      -May have more...
      How to Proceed from Failed 11gR2 Grid Infrastructure (CRS) Installation [ID 942166.1]
      http://oracle-help.com/oracle-rac/node-eviction-oracle-rac/
      https://db.geeksinsight.com/2012/12/27/oracle-rac-node-evictions-11gr2-node-eviction-means-restart-of-cluster-stack-not-reboot-of-node/

      After an Oracle RAC node crashes,rerouting  transactions to survining node:

      https://community.oracle.com/tech/developers/discussion/1079972/shutdown-one-node-in-rac

      Transparent Application Fail-over in Oracle RAC
      http://oracle-help.com/oracle-rac/transparent-application-fail-over-in-oracle-rac/

      What is GPNP profile?

      The GPnP profile is a small XML file located n  GRID_HOME/gpnp/<hostname>/profiles/peer under the name profile.xml. It is used to establish the correct global personality of a node. Each node maintains a local copy of the GPnP Profile and is maintanied by the GPnP Deamon (GPnPD.  GPnP Profile  is used to store necessary information required for the startup of Oracle Clusterware like  SPFILE location,ASM DiskString  etc.It contains various attributes defining node personality.
      Cluster name
      Network classifications (Public/Private)
      Storage to be used for ASM : SPFILE location,ASM DiskString  etc
      Digital signature information : The profile is security sensitive. It might identify the storage to be used as the root partition of a machine.  Hence, it contains digital signature information of the provisioning authority.

      What is GPNP profile?

      Grid plug and play(GPNP) file is small xml file present at os local file system . Each node have their owner GPNP file.
      GPNP file is managed by GPNP daemon.
      It stores information like asm diskstring , asm spfile which are required to start the cluster.

      – Storage to be used for CSS
      – Storage to be used for ASM : SPFILE location,ASM DiskString  
      – public private network details.
      When clusteware is started, It needs voting disk( which is inside ASM). So first it will check the gpnp profile to get the voting disk location( asm_diskstring is defined inside gpnp profile) .As asm is not up at this point, asm voting disk file will read using kfed read command. ( We can run kfed, even when asm instance is down).

      https://rafik-dba.blogspot.com/2019/02/oracle-rac-startup-sequence.html#:~:text=ORACLE%20RAC%20STARTUP%20SEQUENCE%201%20ONS%3A-%20Oracle%20notification,for%20cluster.%203%20SCAN%20Listener%3A-%204%20Node%20Listener%3A-

      ==========
      What are the software stacks in oracle clusterware?

      From 11g onward, there are two stacks for clusterware is CRS.

      lower stack is high availability cluster service stack ( managed by ohasd daemon)
      upper stack is CRSD stack ( managed by CRSd daemon).

      What are the role of CRSD,CSSD,CTSSD, EVMD, GPNPD?

      CRSD – Cluster ready service daemon – It manages the cluster resources , based on OCR information. It includes start,stop and failover or resource. It monitors database instance, asm instance ,listeners, services and etc on and automatically restarts them when failure occurs.

      CSSD – > Cluster syncronization service – It manages the cluster configuration like, which nodes are part of cluster etc. . When a node is added or deleted, it inform the same about this other nodes. It is also responsible for node eviction if situation occurs.

      CSSD has 3 processes – >

      the CSS daemon (ocssd),

      the CSS Agent (cssdagent),  The cssdagent process monitors the cluster and provides input/output fencing.

      the CSS Monitor (cssdmonitor) – Monitors internode cluster health

      CTSSD – > Provides time managment for cluster service. If ntp is running on server, then CTSS runs in observer mode.

      EVMD – > Event Manger ,  Is a background process that publishes Oracle Clusterware events  and manages message flow between the nodes and logs relevant information to log file.
       
      oclskd -> Cluster Kill Daemon – > Is used by CSS to reboot a node based on requests from other nodes in the cluster
       
      Grid IPC daemon (gipcd): Is a helper daemon for the communications infrastructure
       
      Grid Plug and Play (GPNPD): GPNPD provides access to the Grid Plug and Play profile, and coordinates updates to the profile among the nodes of the cluster to ensure that all of the nodes node have the most recent profile.
       
      Multicast Domain Name Service (mDNS): Grid Plug and Play uses the mDNS process to locate profiles in the cluster, as well as by GNS to perform name resolution.
      Oracle Grid Naming Service (GNS): Handles requests sent by external DNS servers, performing name resolution for names defined by the cluster.

      ASM spfile is stored inside ASM diskgroup, So how clusterware starts the ASM instance( as asm instance needs asm file startup)?

      So here is the sequence of cluster startup.

      ohasd is started by init.ohasd

      ohasd accesses OLR file(stored in local file system) to initialize ohasd process.

      ohasd starts gpnpd and cssd.

      cssd process reads gpnp profile to get information like asm_diskstring, asm spfile ..

      cssd scans all the asm disk headers and find the voting disk location and read using kfed command and it joins the cluster.

      To read the spfile, It is not necessary to open the disk.  All information necessary for this stored in the asm disk header. OHASD reads the header of asm disk containing spfile( this spfile location is retrieved from gpnp profile). and contents of the spfile are read using kfed command. Using this asm spfile, ASM instance is started.

      Now asm instance is up, OCR can be accessed, as it is inside ASM diskgroup. So OHASD will star the CRSD.

      So below are the 5 important files it access.

      FILE 1 : OLR ( ORACLE LOCAL REGISTRY )   ——————————-> OHASD Process
      FILE 2 :GPNP PROFILE ( GRID PLUG AND PLAY ) ————————> GPNPD process
      FILE 3 : VOTING DISK —————————————————————-> CSSD Process
      FILE 4 : ASM SPFILE ——————————————————————> OHASD Process
      FILE 5 : OCR ( ORACLE CLUSTER REGISTRY ) ——————————> CRSD Process

      In RAC, where we define the SCAN?

      We can define SCAN with below 2 option.

      Using corporate DNS
      Using Oracle GNS( Grid naming service)

      What is rebootless node fencing?

      Prior to 11.2.0.2  , If failures happens with RAC components like  private interconnect and  voting disk accessibility, then to avoid split brain , oracle clusterware does fast reboot of the node  But the problem was that node reboot that, if any non cluster related processes are running are running on node, then those also gets aborted. Also , with reboot, the resources also need to be remasterd, which is expensive sometime.

      Also if sometime if some issue or blockages in the i/o temporarily then also clusterware will misjudge that, initiate reboot.

      So to avoid this, from 11.2.0.2 onward, this method has been improved, and known as reboot-less node fencing.

      First clusterware finds which node to be evicted
      Then i/0 generating processes will be killed on the problematic node.
      Clusterware resources will be stopped on the problematic node
      OHASD process would be running, will try continuously to start CRS, till issue is resolved.
       

      But if due to any issue, the it is unable to stop the processes on the problematics node( i.e rebootless fencing fails) , then fast reboot will be initiated by cssd.

      Explain the steps for node addition in oracle rac.

      Run gridsetup.sh from any of the existing nodes and select for add node option and then proceed with the rest of part.
      Now extend the oracle_home to the new node using addnode.sh script( from existing node)
      Now run dbca from the existing node and add the new instance.

      https://dbaclass.com/article/how-to-add-a-node-in-oracle-rac-19c/

      Explain the steps for node deletion.

      Delete the instance usind dbca
      Deinstall ORACLE_HOME from $ORACLE_HOME/deinstall
      Run gridsetup.sh and select delete node option

      https://dbaclass.com/article/how-to-delete-a-node-from-oracle-rac-19c/

      asm spfile location is missing inside gpnp profile, Then how will asm instance startup?

      For this, we need to understand the  search order of asm spfile

       First it will check for asm spfile location inside gpnp profile
      If no entry is found inside gpnp profile, then it will check the default path $ORACLE_HOME/dbs/spfile+ASM.ora or a pfile.

      How you troubleshoot, if the cluster node gets rebooted.?

      http://www.dbaref.com/troubleshooting-rac-issues/howtotroubleshootgridinfrastructurestartupissues

      In a 12c two node RAC, What will happen, if I unplug the network cable for private interconnect?

      Rebootless node fencing will happen. i.e the node which is going to be evicted, on that node all cluster services will be down. and the services will be moved to the surviving node. And crs will do the restart attempt continuously until the private interconnect issues fixed.  Please note – the node will not be reboot, only the cluster services willl go down.

      However Prior to 11.2 , In this situation, the node reboot will occur.

      Suppose someone has changed the permission of files inside grid_home. How you will fix it?

      You can run rootcr.sh -init command to revert the permission.

      # cd <GRID_HOME>/crs/install/
      # ./rootcrs.sh -init

      Alternatively you can check the below files under $GRID_HOME>/crs/utl/<hostname>/

      – crsconfig_dirs which has all directories listed in <GRID_HOME> and their permissions

      – crsconfig_fileperms which has list of files and their permissions and locations in <GRID_HOME>.

      CSSD is not coming up ? What you will check and where you will check.

      Voting disk is not accessible
      Issue with private interconnect
       

      2.the auto_start parameter is set to NEVER in ora.ocssd resource . ( To fix the issue, change it to always using crsctl modify resource )

      Oracle RAC Interview Questions>>>https://dbaclass.com/article/oracle-rac-interview-questions/

      Related doc:

      Frequently Asked Questions (RAC FAQ) (Doc ID 220970.1)
      https://oracle-patches.com/en/databases/oracle/oracle-rac-components
      http://oracle-help.com/oracle-rac/rac-11gr2-clusterware-startup-sequence/

      Tuesday, June 27, 2017

      fnd_conc_clone.setup_clean and cmclean.sql

      fnd_conc_clone.setup_clean:
      fnd_conc_clone  ===> is package
      setup_clean ====> it is procedure name
      When we run fnd_conc_clone.setup_clean it deletes the information from the below tables.
      fnd_conc_clone.setup_clean to cleanup fnd_nodes table in the target to clear source node information as part of cloning.
      Delete from fnd_concurrent_queue_size
      Delete from fnd_concurrent_queues_tl
      Delete from fnd_concurrent_queuesDelete from fnd_nodes;

      syntax to execute:
      Connect as apps user:
      SQL> exec fnd_conc_clone.setup_clean;
      PL/SQL procedure successfully completed.
      SQL> commit;
      Commit complete.
      Post  run the autoconfig on DB tier and middle tier this will register the nodes in fnd_nodes tables.

      cmclean.sql
      Scenario when to run:
      To cleanup running and pending requests we use cmclean.sql, If we stop concurrent managers using abort options then concurrent requests will be in running state ,Next when we start concurrent manager the processes will not start properly.

      Clean out the concurrent manager tables by re-setting values to ZERO.

      If cmclean.sql is run when the concurrent managers are up and running ,the script will not be able to clean and remove the rows from the concurrent manager process and request tables as the ICM and other managers will be holding locks on these tables.So its better to shutdown the concurrent manager cleanly and then run the cmclean.sql

      CMCLEAN will update below tables:
      1) FND_CONCURRENT_QUEUES
      2) FND_CONCURRENT_PROCESSES
      3) FND_CONCURRENT_REQUESTS
      4) FND_CONFLICTS_DOMAIN
      5) FND_CONCURRENT_CONFLICT_SETS
      Doc Id: [ID 134007.1]

      How to rebuild context file

      Database
      export ORACLE_HOME=/u01//product/12.1.0.2/dbhome
      export LD_LIBRARY_PATH=$ORACLE_HOME/lib:$ORACLE_HOME/ctx/lib
      export ORACLE_SID=TEST
      export PATH=$PATH:$ORACLE_HOME/bin;
      export TNS_ADMIN=$ORACLE_HOME/network/admin/TEST_<hostname>


      EXEC FND_CONC_CLONE.SETUP_CLEAN;
      perl adbldxml.pl appsuser=APPS appspasswd=password

      =====
      Application:

      11i:
      cd $AD_TOP/bin
      perl adbldxml.pl tier=apps appsuser=apps appspass=apps template=/u01/apps/prodappl/ad/11.5.0/admin/template/adxmlctx.tmp log=/home/applmgr/adbldxml.log out=/u01/apps/prodappl/admin/PROD_apps.xml servername=apps.oracle.com jtop=/u01/apps/prodcomn/util

      R12:
       perl $COMMON_TOP/clone/bin/adclonectx.pl retrieve

      Tuesday, June 20, 2017

      Crontab Format

      An example of crontab format with commented fields is as follows:


      Wednesday, June 14, 2017

      Data Dictionary views and V$ views(dynamic View)

      Data Dictionary views:
      Data will not be lost even after instance is shutdowned
      Will be accessible only if instance is OPENED
      Data dictionary view names are plural

      V$ views(dynamic View):
      Data will be lost if instance is shutdowned
      (some are) Will be accessible even if instance is in mount or nomount stage (STARTED)
      V$ view names are singular

      SYS, SYSDBA, SYSOPER and SYSTEM

      sys and system are "real schemas", there are the default user.
      Both automatically created on database creation and granted the DBA role.
      SYS super user and have full control of the database and have two default role sysdba and sysoper.
      SYS is like root for us. It holds the data dictionary, it is special (it physically works differently from other accounts - no flashback query for it, no read only transactions, no triggers, etc)
      SYSTEM is our DBA account, it is just a normal user.
      Data dictionary can be changed by sys but not with the system.
      sysdba and sysoper are ROLES - they are not users, not schemas.
      The SYSDBA role is like "root" on unix or "Administrator" on Windows. It sees all, can do all. Internally, if you connect as sysdba, your schema name will appear to be SYS.
      sysoper is another role, if you connect as sysoper, you'll be in a schema "public" and will only be able to do things granted to public AND start/stop the database.
      sysoper is something you should use to startup and shutdown. You'll use sysoper much more often than sysdba.

      *Role> means authorization to do something. It is bunch of previledges.

      difference between Role & Privilage:
      Privileges control the ability to run SQL statements. A role is a group of privileges. Granting a role to a user gives them the privileges contained in the role.

      A privilege is a right to execute an SQL statement or to access another user's object. In Oracle, there are two types of privileges: system privileges and object privileges.
      A privileges can be assigned to a user or a role

      SGA_MAX_SIZE & SGA_TARGET / MEMORY_TARGET & MEMORY_MAX_TARGET

      SGA_MAX_SIZE sets the overall amount of memory the SGA can consume but is not dynamic.
      The SGA_MAX_SIZE parameter is the max allowable size to resize the SGA Memory area parameters.

      If the SGA_TARGET is set to some value then the Automatic Shared Memory Management (ASMM) is enabled, the SGA_TARGET value can be adjusted up to the SGA_MAX_SIZE parameter, not more than SGA_MAX_SIZE parameter value.This parameter is dynamic and can be increased up to the value of SGA_MAX_SIZE
      MEMORY_TARGET & MEMORY_MAX_TARGET

      SGA and PGA can manage together rather than managing them separately.

      If SGA_TARGET, SGA_MAX_SIZE and PGA_AGGREGATE_TARGET is set to 0 and set MEMORY_TARGET (and optionally MEMORY_MAX_TARGET) to non zero value, Oracle will manage both SGA components and PGA together within the limit specified.

      If MEMORY_TARGET is set to 1024MB, Oracle will manage SGA and PGA components within itself.

      If MEMORY_TARGET is set to non zero value:

      SGA_TARGET, SGA_MAX_SIZE and PGA_AGGREGATE_TARGET are set to 0, 60% of memory mentioned in MEMORY_TARGET is allocated to SGA and rest 40% is kept for PGA.
      SGA_TARGET and PGA_AGGREGATE_TARGET are set to non-zero values, these values will be considered minimum values.(But sum of SGA_TARGET and PGA_AGGREGATE_TARGET should be less than or equal to MEMORY_TARGET).
      SGA_TARGET is set to non zero value and PGA_AGGREGATE_TARGET is not set. Still these values will be autotuned and PGA_AGGREGATE_TARGET will be initialized with value of (MEMORY_TARGET-SGA_TARGET).
      PGA_AGGREGATE_TARGET is set and SGA_TARGET is not set. Still both parameters will be autotunes. SGA_TARGET will be initialized to a value of (MEMORY_TARGET-PGA_AGGREGATE_TARGET).

      Tuesday, June 13, 2017

      begin backup mode

      DBWn checkpoints the tablespace (writes out all dirty blocks as of a given SCN)
      CKPT stops updating the Checkpoint SCN field in the datafile headers and begins updating the Hot Backup Checkpoint SCN field instead
      LGWR begins logging full images of changed blocks the first time a block is changed after being written by DBWn

      Adpreclone and Adcfgclone

      What will happen When you run Adpreclone and Adcfgclone?
      Adpreclone.pl script prepare the source system to be cloned by collecting information about source
      system. Create a cloning stage area,generate template and driver from existing files that contain source specific hard coded value.
      When you run “adpreclone.pl dbTier” on DB side
      Following directories will be created in the ORACLE_HOME/appsutil/cloneJlib, db, data where “Jlib” relates to libraries “db” will contain the techstack information, “data” will contain the information related to datafiles and required for cloning.
      Creates driver files at ORACLE_HOME/appsutil/driver/instconf.drv
      Converts inventory from binary to xml, the xml file is located at $ORACLE_HOME/appsutil/clone/context/db/Sid_context.xml
      Prepare database for cloning:  This includes creating database control file script and datafile location information file at
      $ORACLE_HOME/appsutil/templateadcrdbclone.sql, dbfinfo.lst
      Generates database creation driver file at ORACLE_HOME/appsutil/clone/data/driverdata.drv
      Copy JDBC Libraries at ORACLE_HOME/appsutil/clone/jlib/classes12.jar and appsutil

      When you run “adpreclone.pl appsTier” On Apps Side
      This will create stage directory at $COMMON_TOP/clone. This also run in two steps.
      Techstack:  Creates template files for Oracle_iAS_Home/appsutil/template and Oracle_806_Home/appsutil/template
      Creates Techstack driver files for IAS_ORACLE_HOME/appsutil/driver/instconf.drv and 806_ORACLE_HOME/appsutil/driver/instconf.drv
      APPL_TOP preparation:  It will create application top driver file at $COMMON_TOP/clone/appl/driver/appl.drv-Copy JDBC libraries and $COMMON_TOP/clone/jlib/classes111.zip

      what Perl adcfgclone.pl dbTechStack do?
      Perl adcfgclone.pl dbTechStack will do below things.
      1)Create context file
      2)Register ORACLE_HOME
      3)Relink ORACLE_HOME
      4)Configure ORACLE_HOME
      5)Start SQL*NET listener

      what Perl adcfgclone.pl dbTier do?
      1)Create context file
      2)Register ORACLE_HOME
      3)Relink ORACLE_HOME
      4)Configure ORACLE_HOME
      5)Recreate controlfile
      6)Configure database
      7)Start SQL*NET listener
      ==
      cd $ORACLE_HOME/appsutils/clone/bin
      perl adcfgclone.pl dbTier pwd=apps
      This will use the templates and driver files those were created while running adpreclone.pl on source system and has been copied to target system.
      Following scripts are run by adcfgclone.pl dbTier for configuring techstack
      adchkutl.sh — This will check the system for ld, ar, cc, and make versions.
      adclonectx.pl — This will clone the context file. This will ceate a new context file as per the details of this instance.
      runInstallConfigDriver — located in $Oracle_Home/appsutil/driver/instconf.drv
      Relinking $Oracle_Home/appsutil/install/adlnkoh.sh — This will relink ORACLE_HOME
      For data on database side, following scripts are runDriver file $Oracle_Home/appsutil/clone/context/data/driver/data.drv
      Create database adcrdb.zipAutoconfig is runControl file creation adcrdbclone.sql
      Run adcfgclone.pl for dbTier.

      what Perl adcfgclone.pl appsTier do?
      perl adcfgclone.pl appsTier will do below things.
      1)Create context file
      2)Register ORACLE_HOME
      3)Relink ORACLE_HOME
      4)Configure ORACLE_HOME
      5)Create INST_TOP
      6)Configure APPL_TOP
      7)Start Apps Processses
      ==
      On Application Side

      cd $COMMON_TOP/clone/bin/
      perl adcfgclone.pl appsTier pwd=apps
      Following scripts are run by adcfgclone.pl:
      Creates context file for target adclonectx.pl
      Run driver files $ORACLE_HOME/appsutil/driver/instconf.drv and $IAS_ORACLE_HOME/appsutil/driver/instconf.drv
      Relinking of Oracle Home $ORACLE_HOME/bin/adlnk806.sh and $IAS_ORACLE_HOME/bin/adlnkiAS.sh
      At the end it will run the driver file $COMMON_TOP/clone/appl/driver/appl.drv and then runs autoconfig.

      When we run adcfgclone.pl which script it will call?
      It will call adclone.pl which is located at $AD_TOP/bin .

      When we run perl adpreclone.pl dbTier why it requires apps password?
      It requires a database connection to validate apps schema.

      When do you run adpreclone on Production?
      If any changes made to either TechStack,database or any patches applied.

      How do we find adpreclone is run in source or not ?
       If clone directory exists under $RDBMS_ORACLE_HOME/appsutil for oracle user and $COMMON_TOP/clone for applmgr user.

      When we run perl adpreclone.pl appTier why it will not prompt for apps password?
      It doesn’t require db a connection.

      adcfgclone on database node we had three modes
      perl adcfgclone.pl dbTier
      It will configure the ORACLE_HOME on the target database tier node and  recreate the controlfiles.
      This is specially used in case of standby database and/or hot backups. It will take care of all the steps.

      perl adcfgclone.pl dbTechStack
      It will configure the ORACLE_HOME on the target database tier node only. Relink the oracle home.

      perl adcfgclone.pl dbconfig
      It is used to configure the database with  context file.Database should be in open mode.

      adcfgclone.pl appsTier dualfs
      DUALFS – new feature is introduced in the latest AD-TXK Delta 7.
      This feature will create both the filesystems fs1 and fs2 during the clonning process.

      Cloning Procedure:

      Instance Details:
      Source Instance : PRD
      Target Instance : TST

      Pre-Tasks:
      Do blackout and bring down taget system’s services
      Backup parameter file, CONTEXT_FILEs and all environment file of both application and database.
      Remove Application files(apps_st, tech_st and INST_TOP) of target system.
      drop the database:
      screen -S ownuser
      sudo su - oracle
      Startup mount exclusive restrict;
      show parameter CLUSTER_DATABASE;
      select name,open_mode from v$database;
      Drop database;
      Verify space on the database node and applications node

      Prepare the source system:
      On source, run adpreclone.pl ( if not executed recently)
      $ cd  $ORACLE_HOME/appsutil/scripts/[CONTEXT_NAME]
      $ perl adpreclone.pl dbTier

      $ cd $INST_TOP/admin/scripts
      $ perl adpreclone.pl appsTier

      Copying files:
      On DB Node
      Copy the latest DB backups from source to target (Including archivelog backup and controlfile backup)
      On Apps Node
      Copy apps_st and tech_st from source application node to target application node.

      Steps to Clone Database, here can be multiple ways.
      1.With duplicate from backup or standby database:

      cle@hostname scripts]$ cat duplicate.sh
      #!/bin/ksh
      today=`date +"%Y%m%d_%H:%M:%S"`
      . /u01/home/oracle/TST.env
      rman=$ORACLE_HOME/bin/rman
      $ORACLE_HOME/bin/rman cmdfile=/u01/script/duplicate.fic log=/u01/log/rman_9june17.log
      [oracle@hostname scripts]$ cat duplicate.fic
      @/u01/script/duplicate.cnx
      run {
      allocate auxiliary channel uisk1 type DISK;
      SET NEWNAME FOR BLOCK CHANGE TRACKING FILE TO '+MAN';
      #set until time "TO_DATE('2016-06-19 00:25:00','yyyy-mm-dd hh24:mi:ss')";
      duplicate database to TST backup location '/backup/PRD/rman' nofilenamecheck
      LOGFILE
        GROUP 1 ('+TST_REDO') SIZE 500M,
        GROUP 2 ('+TST_REDO') SIZE 500M,
        GROUP 3 ('+TST_REDO') SIZE 500M
        ;
        }
      [oracle@hostname scripts]$ cat /u01/script/duplicate.cnx
      set echo off
      connect auxiliary /
      set echo on
      [oracle@hostname scripts]$

      2. Available hot backup:
      Change the db_name parameter in pfile that is backed up (eg: TST to PRD)
      Start the target db using modified pfile
      Sql > startup nomount pfile=’/backup/clone/initTST.ora’
      Rstore the controlfile from the backup of PRD
      RMAN> restore controlfile from ‘/backup/clone/PRD/PRD.ctl’

      Mount the database
      Sql> alter database mount
      Once db is mounted catalog the backups copied from source.
      RMAN > catalog start with ‘/backup/clone/PROD/’;

      Once the backups are cataloged we can restore the database usig the PRD’s backup. But the database files are pointing to PRD’s
      location so it will try to restore the datafiles to PRD’s location which may not exist in target.

      Hence we will have to rename the datafiles to point to new(TST) location before restoring. We can rename the files or use SET NEWNAME in RMAN before restoring.
      eg: set newname for datafile 1 to ‘/u01/oracle/TST/system.dbf’;

      Note : Below script can be used to create the script for “SET NEWNAME”. Change the path of target datafile location and run the script in source.
      select ‘set newname for datafile ‘ || file_id || ‘ to ”/u01/oracle/TST/’ ||
      substr(file_name,instr(file_name,’/’,-1)+1) || ”’;’
      from dba_data_files;

      Restore and recover the database
      Eg:
      Rman>run {
      set until time “to_date(’26/08/2017 24:20:00′,’dd/mm/yyyy hh24:mi:ss’)”;
      set newname for datafile 1 to ‘/u01/oracle/TST/system.dbf’;
      set newname for datafile 44 to ‘/u01/oracle/TST/apps_ts_tx_data_01.dbf’;
      set newname for datafile 76 to ‘/u01/oracle/TST/apps_ts_tx_idx01.dbf’;
      set newname for datafile 35 to ‘/u01/oracle/TST/sysaux_01.dbf’;
      set newname for datafile 37 to ‘/u01/oracle/TST/undotbs1_01.dbf’;
      allocate channel ch1 type disk;
      allocate channel ch2 type disk;
      restore database;
      switch datafile all;
      recover database;
      release channel ch1;
      release channel ch2;
      }

      After the database is restored and recovered then open the database using resetlogs.
      SQL> alter database open resetlogs;
      Change the database name to TEST
      nid TARGET=SYS/welcome123 DBNAME=TEST
      Once the database is renamed the nid command will bring down the database. Start it using the old pfile in ORACLE_HOME/dbs. (which has db_name as TEST)
      Sql>startup mount

      Sql>alter database open resetlogs;

      3. Put the source in begun backup mode and contnue:
      Put the database in the begin backup mode so that the backup can be performed without disturbing to other operations in the database.
      SQL> alter database begin backup;
      You can check the status of backup activation mode by using the below command
      SQL> select * from v$backup;
      status:ACTIVE
      Copy the all database files from the database tier to the target location
      Next release the database from the backup mode,
      SQL> alter database end backup;
      Status:NOT ACTIVE
      Next take backup of the controlfile to the backup location by using the command,
      SQL> alter database backup contolfile to trace as ‘/u01/oraR12/cont.sql’;
      Copy the generated archivelog file to the target location which is required for recovering the database at target location. Copy the trace of controlfile also.
      $ cd /u01/oraR12/db/tech_st/11.1.0/appsutil/clone/bin
      $ perl adcfgclone.pl dbTechStack
      Recreates the database control files
      Once control file is created, database will be in mount stage. Execute recover command using backup control file after the database is mounted.
      SQL> RECOVER DATABASE USING BACKUP CONTROLFILE UNTIL CANCEL;
      Starts the database

      4.Cold Backup

      Post Steps in Database:
      Recreate ALL the temp tablespaces.
      SQL> EXEC FND_CONC_CLONE.SETUP_CLEAN;
      Run auto-config on DB Tier after ensuring that the DB and listener are up and running.

      Steps to Clone Application:

      Compare the size (du –sh) output with production for same content.
      cd /u01/oracle/TST/apps/apps_st/comn/clone/bin
      perl adcfgclone.pl appsTier <Path of the context_file that was saved in Pre Task B>
      Stop the concurrent managers and workfow.
      Stop the Applications
      Change Apps password:
      FNDCPASS apps/xxx 0 Y system/xx SYSTEM APPLSYS XXX
      Change Sysasmin password:
      FNDCPASS apps/xxx 0 Y system/xxx USER SYSADMIN xxx
      Point all the fndcpesr soft links in $XX_TOP/bin to $FND_TOP/bin/fndcpesr
      Run cmclean.sql
      Run adautocfg.sh
      Start all the services
      Login to the application using login page and launch the forms.CM check
      Change the Site Name to the name of the current environment with information of the Refreshed time

      Monday, June 12, 2017

      Physical Standby Database

      Why we need Data Guard:

      High Availability
      Disaster Recovery
      Data Protection
      Workload Offloading & Testing

      How Data Guard Works:

      In Simple Terms, Dataguard Track the Changes on Primary Database and Replicate the same to Standby Databases through Redo Logs/Archive Logs.

      Evolution of Oracle Dataguard:

      Oracle Dataguard initially called as Standby database which introduced in version 7.3

      In that version, The Redo files has to be copied manually and the database has to be recovered manually.

      In 8i Version, Oracle Introduced Automated Archivelog Shipping and Managed Recovery Mode. But there are no option for log Delay.

      In 9i Version, Oracle Renamed it as Oracle Dataguard and Introduced Dataguard Broker with Switchover and Delay options. But there are no failover option and after the switchover, the other database has to be rebuild manually.

      In 10g version, Oracle Introduced Real time Apply with Fast Start Failover option. Oracle also introduced Database Flashback option along with Guaranteed Restore points.

      In 11g version, Oracle Introduced Active Standby and Snapshot Standby Options. Also, the Block CHange Tracking enabled at Standby through which the RMAN incremental Backups can be offloaded.

      In 12c version, Oracle Introduced Multi Instance Redo Apply and Parallel Multi instance REcovery which enabled all Nodes of RAC Instance participated in Recovery Operation incase if the standby database is RAC Configured. THe Support for Diagnostic, tuning packs enabled to create awr reports and analyze them with SQL plan analyzer.

      In 18c, Oracle Introduced Recovery of standby database using Service. through this we will be able to refresh the standby database including controlfile, datafile and newly added datafiles Just with a single command. We can also create Global Temporary Tables for Testing purpose.

      In 19c, Oracle Introduced 2 exciting features..

      First is Active DG DML Redirect, through which even the DML Statements can be executed directly on Standby Database which will be applied to Primary Database as well which reduces the workload of Primary Database. Also we have automatic Standby recovery through Flashback feature by which we dont need to flashback/rebuild the standby database after Primary Flashback..

      Types of Standby databases:

      Classified as Physical and Logical Standby databases.

      Physical Standby, the Redo data gets applied, to make the standby in sync.

      Logical Standby, The corresponding SQL Statment gets applied to make standby database sync with Primary.

      Usually the Physical Standby is in Mount Mode. But the Active Standby Option enables it open with Read Only Mode along with the changes on Primary gets applied on Standby database. This Active Dataguard feature useful for Offloading Reporting queries along with Backup jobs etc.

      The Snapshot Standby Features enables the database open in Read-Write Mode with a Guaranteed Restrore point created along with Flashback. This enables the standby database to perform Code Testing and Load Testing Activities. Once the activity completed, the Database put back in physical standby and all the database changes are flashed back.












      What are the services required on the primary and standby database ?

      The Redo Buffer on Primary keep track of all changes on the database which then Written to Online Redo Logs using LGWR Process.

      The Online Redo Log content written to Archived Redo Log files using ARChiver Process.

      Initially, the Archivelog Were manually copied to Standby database and applied using Recover Standby database Option.

      Later, Oracle Introduced Managed standby Recovery which introduced RFS (Remote File System) and MRP (Managed Recovery Process). After this, the Archivelogs are shipped automatically using Remote Archival method and applied on Standby database which is in Managed Standby Mode.

      But, Incase of Primary Database Crash, the Committed data on Online Redo Logs couldnt be recovered. This result in Data Loss. To Avoid this scenario, Oracle Introduced Standby Redo Logs.

      The services required on the primary database are:

      • Log Writer Process (LGWR) - Collects redo information and updates the online redo logs. It can also      create local archived redo logs and transmit online redo to standby databases.

      • Archiver Process (ARCn) - One or more archiver processes make copies of online redo logs either locally or remotely for standby databases.

      • Fetch Archive Log (FAL) Server - Services requests for archive redo logs from FAL clients running on multiple standby databases. Multiple FAL servers can be run on a primary database, one for each FAL request. .

      The services required on the standby database are:

      • Fetch Archive Log (FAL) Client - Pulls archived redo log files from the primary site. Initiates transfer of archived redo logs when it detects a gap sequence.

      • Remote File Server (RFS) - Receives archived and/or standby redo logs from the primary database.

      • Archiver (ARCn) Processes - Archives the standby redo logs applied by the managed recovery process (MRP).

      • Managed Recovery Process (MRP) - Applies archive redo log information to the standby database.

      On the Primary Database, Oracle Data Guard uses the Log Writer Process (LGWR) or Archiver Process (ARCH) or Log Writer Network Server Process (LNSn) to collect transactions redo data and ship this data to the standby.
      On the standby database Oracle Data Guard uses the Remote File Server (RFS) process to receive the redo records from the primary database, the Managed Recover Process (MRP) to apply redo information/recovery to the physical standby database.

      Fetch archive log (FAL) process (physical standby databases only) – This process provides a client/server mechanism for resolving gaps detected in the range of archived redo logs generated at the primary database and received at the standby database. The FAL client requests the transfer of archived redo log files automatically when it detects a gap in the redo logs received by the standby database. The FAL server typically runs on the primary database and services the FAL requests coming from the FAL client. The FAL client and server are configured using the FAL_CLIENT and FAL_SERVER initialization parameters which are set on the standby location.

      DB_UNIQUE_NAME:
      DB_UNIQUE_NAME specifies a globally unique name for the database. Databases with the same DB_NAME within the same DB_DOMAIN(for example, copies of a database created for reporting or a physical standby) must have a unique DB_UNIQUE_NAME. Every database's DB_UNIQUE_NAME must be unique within the enterprise .The DB_UNIQUE_NAME values of the primary and standby database should be used in the DG_CONFIG setting of the LOG_ARCHIVE_CONFIG parameter.

      Specifies a unique name for this database. This name does not change even if the primary and standby databases reverse roles.DB_UNIQUE_NAME is required if you specify the LOG_ARCHIVE_CONFIG parameter. Specify the DG_CONFIG attribute to identify the DB_UNIQUE_NAME  for the primary database and each standby database in the Data Guard configuration. The default value of this parameter enables the primary database to send redo data to remote destinations and enables standby databases to receive redo data.

      ALTER SYSTEM SET LOG_ARCHIVE_CONFIG='DG_CONFIG=(DB11G,DB11G_STBY)';
      LOG_ARCHIVE_CONFIG='DG_CONFIG=(db_unique_name, db_unique_name, ...)'
      this parameter enables the primary database to send redo data to remote destinations and enables standby databases to receive redo data.Specify the DG_CONFIG attribute to identify the DB_UNIQUE_NAME for the primary database and each standby database in the Data Guard configuration.

      LOG_FILE_NAME_CONVERT:
      This parameter converts the path names of the primary database online redo log file to path names on the standby database.

       DB_FILE_NAME_CONVERT:
      This parameter must specify paired strings. The first string is a sequence of characters to be looked for in a primary database filename.If that sequence of characters is matched, it is replaced by the second string to construct the standby database filename.

      STANDBY_FILE_MANAGEMENT:
      Set the STANDBY_FILE_MANAGEMENT parameter to AUTO so that when datafiles are added to or dropped from the primary database,x`corresponding changes are made in the standby database without manual intervention.

      Synchronous transport (SYNC) is also referred to as "zero data loss" method because the LGWR is not allowed to acknowledge a commit has succeeded until the LNS can confirm that the redo needed to recover the transaction has been written at the standby site.

      Asynchronous transport (ASYNC) is different from SYNC in that it eliminates the requirement that the LGWR waits for a acknowledgment from the LNS, creating a "near zero" performance on the primary database regardless of distance between the primary and the standby locations.

      AFFIRM - the LNS process waits for the RFS to perform a direct I/O on the SRL file before returning a success message
      NOAFFIRM - the LNS process will not wait and sends a success message immediately

      Modes of Standby databases:

      A protection mode is only set on the primary database.It defines the way Oracle Data Guard will maximize a Data Guard configuration for performance, availability, or protection in order to achieve the maximum amount of allowed data loss when the primary database or site fails.
      We can between three protection modes, with different logging options.  Each mode for your environment has a different impact on availability, costs, data loss, performance, and scalability.

      Maximum Protection mode:
      Maximum Protection mode with LGWR SYNC AFFIRM option for an environment that requires no data loss and no divergence.
      This mode has highest level of data protection and ensures no-data-loss.Data is synchronously transmitted to the standby database from the primary database.Transactions are not committed on the primary database unless the redo data is available on at least one standby database configured in this mode.If the last standby database configured in this mode becomes unavailable, processing stops on the primary database.

      Maximum Protection Mode which ensure transaction on primary commits only after it is applied on Standby. There are no Dataloss in Max Protection mode.Here the redo transport type is SYNC & AFFIRM.
      Maximum Availability:
      Maximum Availability mode with LGWR SYNC AFFIRM option for an environment that needs no data loss but tolerates divergence when sites are temporarily inaccessible.
      similar to the maximum protection mode, including zero data loss.  Transactions are not allowed to commit until all redo data are written to the online redo logs and propagated to at least one synchronized secondary database.If for any reason, the primary database cannot propagate its redo stream to one secondary database, the primary will NOT shutdown and operates as it it were in maximum performance mode until issues are fixed.

      Maximum availability Mode. In This also, the transaction on Primary gets commited only after it is applied on standby database. But If Standby not available due to outage/network issue, the transaction on primary will continue without any impact.. Usually there will not be any data loss. Here also the redo transport is sync & Affirm.

      Maximum Performance:
      Maximum Performance mode with ARCH/ LGWR ASYNC (AFFIRM or NOAFFIRM) option for an environment that tolerates minimal data loss and divergence when sites are temporarily inaccessible.
      In this mode, as the primary database processes transactions, redo data is asynchronously shipped to the standby database.The commit operation of the primary database does not wait for the standby database to acknowledge receipt of redo data before completing write operations on the primary database.If any standby destination becomes unavailable, processing  continues on the primary database and there is little effect on primary database performance. This is the default mode.

      Maximum performance mode in which the primary database transaction will not wait for Standby replication which improve the performance of the primary database. But there are significant chances of Data Loss.

      Important related query:

      Gap between DR and Primary:
      ++++++++++++++++++++++++++
      select to_char(sysdate,'YYYY-MM-DD HH24:MI:SS') WHEN,': standby is ' || trim(to_char(1440 * (sysdate - max (next_time)),99999.99) ||' minutes behind') LAG from v$archived_log where applied ='YES';
      select to_char(sysdate,'YYYY-MM-DD HH24:MI:SS') WHEN,': standby is ' || trim(to_char(1440 * (sysdate - max (next_time)),99999.99) ||' minutes behind') LAG from $archived_log where applied ='YES';

      ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;

      recover managed standby database nodelay parallel 20 disconnect from session;

      recover managed standby database parallel 20 delay 240 disconnect from session;
      alter database mount standby database;
      alter database open read only
      recover standby database until time '2015-10-08 05:00:00' using backup controlfile parallel 20;
      select distinct to_char(CHECKPOINT_TIME,'DD-MON-YYYY HH24:MI:SS') from v$datafile_header;

      set lines 500;
      select PROCESS,CLIENT_PROCESS,STATUS,THREAD#,SEQUENCE#,BLOCK#,BLOCKS from v$managed_standby where process like '%MRP%';
      select THREAD#,MAX(SEQUENCE#),ROUND((SYSDATE-MAX(FIRST_TIME))*24,2) "Hours Bejind" from v$archived_log where applied='YES' group by THREAD#;


      ALTER SYSTEM SET local_listener='DB_NAME_LOCAL' SCOPE=spfile;


      SELECT ARCH.THREAD# "Thread", ARCH.SEQUENCE# "Last Sequence Received", APPL.SEQUENCE# "Last Sequence Applied", (ARCH.SEQUENCE# - APPL.SEQUENCE#)
      "Difference" FROM (SELECT THREAD# ,SEQUENCE# FROM V$ARCHIVED_LOG WHERE (THREAD#,FIRST_TIME ) IN (SELECT THREAD#,MAX(FIRST_TIME) FROM V$ARCHIVED_LOG GROUP BY THREAD#))
      ARCH, (SELECT THREAD# ,SEQUENCE# FROM V$LOG_HISTORY WHERE (THREAD#,FIRST_TIME ) IN (SELECT THREAD#,MAX(FIRST_TIME) FROM V$LOG_HISTORY GROUP BY THREAD#))
      APPL WHERE ARCH.THREAD# = APPL.THREAD# ORDER BY 1;

      alter system set local_listener=TEST_LOCAL scope=both sid='TEST1';
      alter system set remote_listener=TESTD_REMOTE scope=both sid='TEST';
      set linesize 150
      set pagesize 500
      col name for a70
      col "COMPLETION_TIME" for a25
      select NAME,ARCHIVED,APPLIED,DELETED,STATUS,REGISTRAR,to_char(COMPLETION_TIME,'DD-MON-YYYY HH24:MI:SS')"COMPLETION_TIME" from v$archived_log where APPLIED='NO' order by name; 12:04 PM
      alter database register logfile '/oracle/archive/EPICPROD/fs01/EPICPROD.arch2_184958_809040320.dbf'; 12:05 PM
      Select thread#, sequence#, applied, to_char(first_time, 'mm/dd/yy hh24:mi:ss') first, to_char(next_time, 'mm/dd/yy hh24:mi:ss')
      next, to_char(completion_time, 'mm/dd/yy hh24:mi:ss') completion from v$archived_log order by first_time;


      ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DELAY 240 DISCONNECT FROM SESSION;----------------start MRP---in 1g

      ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING ARCHIVED LOGFILE DISCONNECT;-------start MRP in 12c

      Command to check throughput:::::::::::::::::::::::::
      alter session set nls_date_format = 'dd-mm-yyyy hh24:mi:ss';
      Select To_char(START_TIME) "Recover_start",To_char(Item)||' = '||To_char(Sofar)||' '||To_char(Units)||' '||
      To_char(TIMESTAMP) "Values" From v$Recovery_progress Where Start_time=(Select Max(Start_time) From v$Recovery_progress);


      Check MRP Process and Lag for DR:::::::::
      select process,client_process,status,thread#,sequence#,block#,blocks from v$managed_standby where PROCESS like '%MRP%';

      select thread#,max(sequence#),round((sysdate-max(first_time))*24,2) "Hours Behind" from v$archived_log where Applied='YES' group by thread#;

      DG Details:
      ++++++++++++
      show parameter dg;
      dgmgrl sys/<password>
      show configuration verbose

      Standby Redo Logs (SRL):

      After the Introduction of Standby Redo Log files, The Dataguard read the content of Online Redo Logs and write them directly to Standby Redo Logs which was used for Managed Recovery.

      Here comes the 2 new Processes. that is Network Server Async Process (NSA) and Network Server Sync Process (NSS). Prior to 12c, the Log-Write Network Server Process (LNS) Process which is was used instead of NSA and NSS.

      If the database configured with max performance mode which is Async replication, then NSA Process will read the change data from Online Redo Logs which gets written to Standby Redo Log files.

      If the Database in Max Protection or Max Availability Mode which is Sync replication, then NSS Process will read the change data directly from LGWR Process which will be writtent to Standby Redo Log files.

      if you configured Real time apply for the dataguard, then the changes applied to standby database immediately.

      Incase, if Dataguard configured with Log Delay, then Standby redo Log content written to Remote Archive Files and applied after a certain time.

      In case if there is a Lag, RFS process will connect with Primary Archivelog files and get the required data to make the standby database in sync with primary.

      What & Why on Standby Redo Logs (SRL):

      The standby redo log file is similar to an online redo log, exception is that a standby redo log is used to store redo data that has been received from another database

      If standby database is in maximum protection or maximum availability modes, then Standby Redo Logs are mandatory. In Max Protection and Max Availability Modes, the Standby Redo Logs reduce the Transaction Wait time on Standby Commit. But Even if your database is in Max Performance mode, then the Standby Redo Logs can be used to minimize the Data Loss.

      Standby Redo Logs are needed, If you are using Real-Time Apply on Standby Database. Also, Standby Redo Logs used in Cascaded Standby databases where the Redo data read from another standby database.

      Standby Redo Log Creation:

      Just add Standby Clause in Alter database add Logfile command. so the command will be “alter database add Standby Logfile size <>.

      Best Practices of Standby Redo Logs

      Create the Standby Redo Logs with the same exact byte size as the Online Redo Log files. If they can’t be the same exact size, make sure they are bigger than the Online Redo Log files.

      Standby Redo Log Count Should be Equal or Higher than Online Redo Logs. The Best practice is to have 1 Standby Redo Log file higher than Online Redo Log files.

      When you create SRLs in the standby, create SRLs in the primary as well. They will normally never be used. But When you do switchover, These files will be used after the Primary database converted to Standby Database.

      For an Oracle RAC primary database, create the number of SRLs equal or higher than the number of ORLs in all primary instances. For example, if you have a 3-node RAC database with 4 ORLs in each thread, create 12 SRLs (3×4) in your standby.

      Standby redo logs can be created even after the standby has been created. In this case create the SRL’s on the primary before the creation of SRL on the standby database.

      Other Related details can be availed from below topic:

      create standby
      switch-over