Wednesday, November 19, 2014

DB scripts

******************************
/home/satvvikv/act_wait_id.sql
******************************
col SCHEMANAME for a15
select gvs.INST_ID, gvs.SCHEMANAME, gvs.SID, gvp.SPID, gvs.PROCESS, gvs.SQL_ID, substr(gvs.EVENT,1,25) EVENT, substr(gvs.SECONDS_IN_WAIT/60/60,1,3) HRS_WAIT, gvs.MODULE from gv$session gvs, gv$process gvp where gvs.PADDR=gvp.ADDR and gvs.TYPE != 'BACKGROUND' and gvs.USERNAME != 'SYS' and gvs.STATUS = 'ACTIVE' and gvs.SECONDS_IN_WAIT/60/60 > 0.5 order by gvs.INST_ID, gvs.SECONDS_IN_WAIT
/
******************************
/home/satvvikv/act_wait_usr.sql
******************************
select OSUSER, USERNAME, SID, PROCESS, SQL_ID, substr(EVENT,1,25) EVENT, SECONDS_IN_WAIT, MODULE from v$session where TYPE != 'BACKGROUND' and USERNAME not in('SYS','SYSTEM') and STATUS = 'ACTIVE' and SECONDS_IN_WAIT > &WAIT_TIME_IN_SECONDS order by SECONDS_IN_WAIT
/******************************
/home/satvvikv/advisor.sql
******************************
select af.task_name, ao.attr2 segname, ao.attr3 partition, ao.type, af.message
from dba_advisor_findings af, dba_advisor_objects ao
where ao.task_id = af.task_id
and ao.object_id = af.object_id
and ao.OWNER = 'SYS';
******************************
/home/satvvikv/arch_hour.sql
******************************
select
to_char(a.first_time,'YYYY-MM-DD') "Date ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'00',(BLOCKS*BLOCK_SIZE/1024/1024/1024),0)),'9999') "00 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'01',(BLOCKS*BLOCK_SIZE/1024/1024/1024),0)),'9999') "01 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'02',(BLOCKS*BLOCK_SIZE/1024/1024/1024),0)),'9999') "02 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'03',(BLOCKS*BLOCK_SIZE/1024/1024/1024),0)),'9999') "03 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'04',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "04 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'05',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "05 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'06',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "06 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'07',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "07 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'08',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "08 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'09',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "09 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'10',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "10 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'11',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "11 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'12',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "12 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'13',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "13 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'14',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "14 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'15',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "15 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'16',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "16 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'17',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "17 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'18',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "18 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'19',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "19 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'20',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "20 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'21',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "21 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'22',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "22 ",
to_char(sum(decode(substr(to_char(a.first_time,'HH24'),1,3),'23',BLOCKS*BLOCK_SIZE/1024/1024/1024,0)),'9999') "23 ",
to_char(sum(BLOCKS*BLOCK_SIZE/1024/1024/1024),'9999999.99') "TOTAL"
from v$archived_log a
where dest_id=1 and
trunc(a.first_time) >= trunc(sysdate-19)
group by to_char(a.first_time,'YYYY-MM-DD')
order by to_char(a.first_time,'YYYY-MM-DD');
******************************
/home/satvvikv/asw.sql
******************************
SET LINES 188 PAGES 1000
COL EVENT FOR A33
COL P2TEXT FOR A14
COL P1TEXT FOR A25
COL P3TEXT FOR a25
COL SID FOR 999999
COL STATE FOR A10 TRUNC
COL USERNAME FOR A15
SELECT A.USERNAME, A.SQL_ID, EVENT, SECONDS_IN_WAIT SECS_WAIT, STATE, WAIT_TIME, P1, P2, P1TEXT, P2TEXT
FROM V$SESSION A, V$PROCESS B WHERE A.PADDR = B.ADDR
AND A.STATUS = 'ACTIVE' AND A.USERNAME IS NOT NULL
AND A.TYPE != 'BACKGROUND'
ORDER BY 3;
******************************
/home/satvvikv/asw_history.sql
******************************
COL TOTAL_WAIT FOR 999999999999999;
SELECT EN.NAME WAIT_EVENT, SUM(ASH.WAIT_TIME + ASH.TIME_WAITED) TOTAL_WAIT FROM V$ACTIVE_SESSION_HISTORY ASH, V$EVENT_NAME EN WHERE ASH.EVENT_ID = EN.EVENT_ID AND EN.WAIT_CLASS <> 'IDLE' GROUP BY EN.NAME ORDER BY 2 DESC;
******************************
/home/satvvikv/bind_cap.sql
******************************
select  name||'|'|| value_string
from v$sql_bind_capture
where sql_id='&sqlid'
/
******************************
/home/satvvikv/check_stale_stats.sql
******************************
-- - - - - - - - - - - - - - - - - - - - - - - - - Script begins here - - - - - - - - - - - - - - - - - - - - - - - - - -
--  NAME:  CHECK_STALE_STATS.SQL
--   Execute as SYS as sysdba
-- ----------------------------------------------------------------------------------------------------------------
-- AUTHOR: 
--    Raja Ganesh - Oracle Support Services - DataServer Group
--    Copyright 2008, Oracle Corporation     
-- -----------------------------------------------------------------------------------------------------------------
-- PURPOSE:
-- This script is an automated way to deal with stale statistics
-- operations that are required to be done as part of manual
-- upgrade OR when reported by DBUA.
--
-- This script will work in both Windows and Unix platforms from database
-- version 9.2 or higher.
-- ------------------------------------------------------------------------------------------------------------------
-- DISCLAIMER:
--    This script is provided for educational purposes only. It is NOT 
--    supported by Oracle World Wide Technical Support.
--    The script has been tested and appears to work as intended.
--    You should always run new scripts on a test instance initially.
-- -------------------------------------------------------------------------------------------------------------------
SET FEEDBACK OFF
SET LINESIZE 250
SET SERVEROUTPUT ON
DECLARE
-- Variables declared
P_OTAB DBMS_STATS.OBJECTTAB;
MCOUNT NUMBER := 0;
P_VERSION VARCHAR2(10);
-- Cursor defined
CURSOR c1
IS
SELECT distinct schema
FROM dba_registry
ORDER by 1;
-- Beginning of the anonymous block
BEGIN
-- Verifying version from v$instance
SELECT version INTO p_version FROM v$instance;
DBMS_OUTPUT.PUT_LINE(chr(13));
-- Defining Loop 1 for listing schema which have stale stats
FOR x in c1
  LOOP
 DBMS_STATS.GATHER_SCHEMA_STATS(OWNNAME=>x.schema,OPTIONS=>'LIST STALE',OBJLIST=>p_otab);
-- Defining Loop 2 to find number of objects containing stale stats
 FOR i in 1 .. p_otab.count
   LOOP
  IF p_otab(i).objname NOT LIKE 'SYS_%'
   AND p_otab(i).objname NOT IN ('CLU$','COL_USAGE$','FET$','INDPART$',
            'MON_MODS$','TABPART$','HISTGRM$',
            'MON_MODS_ALL$',
            'HIST_HEAD$','IN $','TAB$',
            'WRI$_OPTSTAT_OPR','PUIU$DATA',
            'XDB$NLOCKS_CHILD_NAME_IDX',
            'XDB$NLOCKS_PARENT_OID_IDX',
            'XDB$NLOCKS_RAWTOKEN_IDX', 'XDB$SCHEMA_URL',
            'XDBHI_IDX', 'XDB_PK_H_LINK')
  THEN
-- Incrementing count for  each object found with statle stats
   mcount := mcount + 1;
  END IF;
-- End of Loop 2
   END LOOP;
-- Displays no stale statistics, if coun  is 0
  IF mcount!=0
   THEN
-- Displays Schema with stale stats if count is greater than 0
    DBMS_OUTPUT.PUT_LINE(chr(13));
    DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------------------------------');
    DBMS_OUTPUT.PUT_LINE('-- '|| x.schema || ' schema contains stale statistics use the following to gather the statistics '||'--');
    DBMS_OUTPUT.PUT_LINE('-------------------------------------------------------------------------------------------------------');
   
-- Displays Command to be executed if schema with stale statistics is found depending on the version.
    IF SUBSTR(p_version,1,5) in ('8.1.7','9.0.1','9.2.0')
   THEN
    DBMS_OUTPUT.PUT_LINE(chr(13));
    DBMS_OUTPUT.PUT_LINE('EXEC DBMS_STATS.GATHER_SCHEMA_STATS('''||x.schema||''',OPTIONS=>'''||'GATHER STALE'||''', ESTIMATE_PERCENT  => DBMS_STATS.AUTO_SAMPLE_SIZE, METHOD_OPT => '''||'FOR ALL COLUMNS SIZE AUTO'||''', CASCADE => TRUE);');
    ELSIF SUBSTR(p_version,1,6) in ('10.1.0','10.2.0','11.1.0','11.2.0')
   THEN
    DBMS_OUTPUT.PUT_LINE(chr(13));
    DBMS_OUTPUT.PUT_LINE('EXEC DBMS_STATS.GATHER_DICTIONARY_STATS('''||x.schema||''',OPTIONS=>'''||'GATHER STALE'||''', ESTIMATE_PERCENT  => DBMS_STATS.AUTO_SAMPLE_SIZE, METHOD_OPT => '''||'FOR ALL COLUMNS SIZE AUTO'||''', CASCADE => TRUE);');
    ELSE
    DBMS_OUTPUT.PUT_LINE(chr(13));
    DBMS_OUTPUT.PUT_LINE('Version is '||p_version);
    END IF;
  ELSE
    DBMS_OUTPUT.PUT_LINE('-- There are no stale statistics in '|| x.schema || ' schema.');
    DBMS_OUTPUT.PUT_LINE(chr(13));
  END IF;
-- Reset count to 0.
   mcount := 0;
-- End of Loop 1
  END LOOP;
END;
/
SET FEEDBACK ON
-- - - - - - - - - - - - - - - - - - - - - - - - - Script ends here - - - - - - - - - - - - - - - - - - - - - - - - - -
******************************
/home/satvvikv/ctsche.sql
******************************
 select OWNER,TABLE_NAME,LAST_ANALYZED from dba_tables where owner='CTSCHENNAI' and TABLE_NAME in
('BOB_INWARD','BOB_INWARD_UPDATESTATUS','BOB_OUTWARD','BOB_INWARD_HISTORY','BOB_OUTWARD_HISTORY','HISTORYROUTELOGTABLE',
'PDBFOLDER','WORKDONETABLE','WORKLISTTABLE','WFMESSAGEINPROCESSTABLE','CURRENTROUTELOGTABLE','PDBFOLDER','ISDOC','ISVOLBLOCK',
'BOB_OUTWARD','PDBDOCUMENT','PENDINGWORKLISTTABLE','PROCESSINSTANCETABLE','BOB_COREDATAMATCHING','WORKWITHPSTABLE','WORKINPROCESSTABLE','USRCHIBANK');******************************
/home/satvvikv/ctsmum.sql
******************************
select owner,table_name,last_analyzed, decode(substr(last_analyzed,1,10),substr(sysdate,1,10),(SELECT 'COMPLETE' from dual),(SELECT 'INCOMPLETE' from dual)) "ANALYZE STATUS" from dba_tables where table_name in
('BOB_INWARD','BOB_INWARD_UPDATESTATUS','BOB_OUTWARD','BOB_INWARD_HISTORY',
'BOB_OUTWARD_HISTORY','HISTORYROUTELOGTABLE',',PDBFOLDER','WORKDONETABLE',
'WORKLISTTABLE','WFMESSAGEINPROCESSTABLE','PDBFOLDER','PDBFOLDER','ISDOC',
'ISVOLBLOCK','BOB_OUTWARD','PDBDOCUMENT','PENDINGWORKLISTTABLE',
'PROCESSINSTANCETABLE','BOB_COREDATAMATCHING','WORKWITHPSTABLE',
'WORKINPROCESSTABLE','USRCHIBANK','CURRENTROUTELOGTABLE') order by 3;
******************************
/home/satvvikv/da.sql
******************************
select * from table ( dbms_xplan.display_awr('&sql_id'))
/
******************************
/home/satvvikv/datafiles.sql
******************************
COL FILE_NAME FOR A90
SELECT FILE_ID, FILE_NAME, TABLESPACE_NAME, BYTES/1024/1024 "SIZE_IN_MB", STATUS, AUTOEXTENSIBLE FROM DBA_DATA_FILES ORDER BY 1;
******************************
/home/satvvikv/dc.sql
******************************
select * from table ( dbms_xplan.display_cursor('&sql_id'))
/
******************************
/home/satvvikv/dg.sql
******************************
select GROUP_NUMBER,NAME,STATE,TOTAL_MB/1024 TOTAL_GB,FREE_MB/1024 FREE_GB,HOT_USED_MB/1024 HOT_USED_GB, COLD_USED_MB/1024 COLD_USED_GB,REQUIRED_MIRROR_FREE_MB/1024 REQUIRED_MIRROR_FREE_GB,USABLE_FILE_MB/1024  USABLE_FILE_GB from v$asm_diskgroup
/
******************************
/home/satvvikv/dict.sql
******************************
col comments for a70
select * from dict
 where table_name like upper('%&ENTER%');
******************************
/home/satvvikv/dict_stats.sql
******************************
exec dbms_stats.gather_dictionary_stats;
******************************
/home/satvvikv/dips.sql
******************************
select a.table_name,b.INDEX_NAME,b.INDEX_OWNER,b.PARTITION_NAME,b.LAST_ANALYZED from dba_ind_partitions b,dba_indexes a where b.INDEX_NAME
in (select INDEX_NAME from dba_part_indexes where TABLE_NAME in (select table_name from dba_synonyms
where synonym_name in ('DTD','DTH','TDTD','TDT','LGI','LGT','ATA','ADT','GUP','SOD','ETH','ETD','OTT','OCS','OCP','OCI','ICP'))) and b.index_name=a.index_name order by 5 desc
/
******************************
/home/satvvikv/dis.sql
******************************
select index_name,table_name,last_analyzed from dba_indexes where owner='TBAADM' and table_name in ('ACCT_TURN_OVER_TABLE','SQLDBA_AUDIT_TABLE','PRINT_QUEUE_TABLE','ORG_TRAN_TABLE','LOGIN_VAL_ITEMS_TABLE','BUSINESS_TRAN_TABLE')
/
******************************
/home/satvvikv/dtps.sql
******************************
select TABLE_OWNER,TABLE_NAME,PARTITION_NAME,LAST_ANALYZED,GLOBAL_STATS from dba_tab_partitions where TABLE_NAME in (select table_name from dba_synonyms where synonym_name in ('DTD','DTH','TDTD','TDT','LGI','LGT','ATA','ADT','GUP','SOD','ETH','ETD','OTT','OCS','OCP','OCI','ICP')) order by 4 desc;
******************************
/home/satvvikv/dts.sql
******************************
select table_name,last_analyzed from dba_tables where owner='TBAADM' and table_name in ('ACCT_TURN_OVER_TABLE','SQLDBA_AUDIT_TABLE','PRINT_QUEUE_
TABLE','ORG_TRAN_TABLE','LOGIN_VAL_ITEMS_TABLE','BUSINESS_TRAN_TABLE')
/
******************************
/home/satvvikv/eglc.sql
******************************
COL MODULE FOR A20 TRUNC
COL INST_ID FOR 9999999
COL USERNAME FOR A20 TRUNC
COL SID FOR 99999999
COL MACHINE FOR A30 TRUNC
COL LAST_CALL_ET FOR 99999999
COL OPTIMIZER_COST FOR 999999999999999999999
COL OSUSER FOR A15
SELECT A.INST_ID,A.SID,A.SQL_ID,B.OPTIMIZER_COST,(SELECT COUNT(1) FROM GV$SESSION WHERE SQL_ID=A.SQL_ID AND INST_ID=A.INST_ID) SQL_COUNT, A.USERNAME,A.MACHINE,A.OSUSER,A.MODULE,A.LAST_CALL_ET/60 MINS FROM GV$SESSION A , GV$SQLAREA B WHERE A.SQL_ID=B.SQL_ID AND A.INST_ID=B.INST_ID AND A.STATUS = 'ACTIVE' AND A.USERNAME IS NOT NULL AND A.TYPE != 'BACKGROUND' ORDER BY 4,LAST_CALL_ET;
******************************
/home/satvvikv/eglc_d.sql
******************************
COL MODULE FOR A20 TRUNC;
COL INST_ID FOR 9999999;
COL USERNAME FOR A10;
COL OSUSER FOR A12;
COL MACHINE FOR A15 TRUNC;
COL LAST_CALL_ET FOR 99999999;
COL EVENT FOR A35 TRUNC;
SELECT A.INST_ID, A.SQL_ID, (SELECT COUNT(1) FROM GV$SESSION WHERE SQL_ID=A.SQL_ID) SQL_COUNT, A.OSUSER, A.USERNAME, A.MACHINE, A.MODULE, A.EVENT, A.LAST_CALL_ET/60 mINS, A.SECONDS_IN_WAIT/60 WAIT_MINS FROM GV$SESSION A , GV$SQLAREA B WHERE A.SQL_ID=B.SQL_ID AND A.STATUS = 'ACTIVE' AND A.USERNAME IS NOT NULL AND A.TYPE != 'BACKGROUND' AND A.SQL_ID = '&SQL_ID' ORDER BY 3,LAST_CALL_ET;

******************************
/home/satvvikv/elc.sql
******************************
COL MODULE FOR A20 TRUNC
COL USERNAME FOR A20 TRUNC
COL SID FOR 99999999
COL MACHINE FOR A30 TRUNC
COL LAST_CALL_ET FOR 99999999
COL OPTIMIZER_COST FOR 999999999999999999999
COL OSUSER FOR A15
SELECT A.SID,A.SQL_ID,B.OPTIMIZER_COST,(SELECT COUNT(1) FROM V$SESSION WHERE SQL_ID=A.SQL_ID) SQL_COUNT, A.OSUSER, A.USERNAME,A.MACHINE,A.MODULE,A.LAST_CALL_ET/60 MINs FROM V$SESSION A , V$SQLAREA B WHERE A.SQL_ID=B.SQL_ID AND A.STATUS = 'ACTIVE' AND A.USERNAME IS NOT NULL AND A.TYPE != 'BACKGROUND' ORDER BY 3,LAST_CALL_ET;
******************************
/home/satvvikv/elc_d.sql
******************************
col module for a20 trunc
col username for a20 trunc
col schemaname for a20 trunc
col sid for 99999999
col last_call_et for 99999999
col osuser for a15
select a.sid,a.sql_id,b.optimizer_cost,(select count(1) from v$session where sql_id=a.sql_id) sql_count, a.osuser, a.username,a.machine,a.module,a.last_call_et/60/60 Hours from v$session a , v$sqlarea b where a.sql_id=b.sql_id and a.status = 'ACTIVE' and a.username is not null and a.type != 'BACKGROUND' and a.sql_id = '&SQL_ID' order by 3,last_call_et;
******************************
/home/satvvikv/event.sql
******************************
COL MODULE FOR A40
COL OSUSER FOR A10
COL USERNAME FOR A8
SET PAGES 10000
COL SID FOR 99999999
COL SQL_CNT FOR 9999999
COL MACHINE FOR A10
COL LAST_CALL_ET FOR 999999
COL SPID  FOR A6
COL APP_ID FOR A6
BREAK ON SQL_ID dup skip page
SELECT /*SATVVIK*/ VP.SPID, VS.SQL_ID, (SELECT OPTIMIZER_COST FROM V$SQLAREA WHERE SQL_ID=VS.SQL_ID) SQL_COST, (SELECT COUNT(1) FROM V$SESSION WHERE SQL_HASH_VALUE=VS.SQL_HASH_VALUE) SQL_COUNT, VS.PROCESS APP_ID, VS.USERNAME, TO_CHAR(VS.LOGON_TIME,'DD-MM HH24:MI:SS') LOGON_TIME, VS.OSUSER, VS.MODULE, VS.MACHINE, VS.LAST_CALL_ET FROM V$SESSION VS, V$PROCESS VP WHERE VS.PADDR = VP.ADDR AND  VS.STATUS = 'ACTIVE' AND VS.USERNAME IS NOT NULL AND VS.TYPE <> 'BACKGROUND' AND VS.EVENT='&EVENT' ORDER BY 3,8;
******************************
/home/satvvikv/finalhw.sql
******************************
select final_blocking_Session,final_blocking_instance ,count(1) from gv$session group by final_blocking_Session,final_blocking_instance;
******************************
/home/satvvikv/ftext.sql
******************************
set head off;
set long 99999999
select SQL_FULLTEXT from v$sqlarea where SQL_ID = '&SQL_ID';
set head on;

******************************
/home/satvvikv/gact_wait_usr.sql
******************************
select gvs.INST_ID, gvs.OSUSER, gvs.USERNAME, gvs.SID, gvs.PROCESS, gvs.SQL_ID, substr(gvs.EVENT,1,25) EVENT, gvs.SECONDS_IN_WAIT, gvs.MODULE from gv$session gvs where gvs.TYPE != 'BACKGROUND' and gvs.USERNAME not in ('SYS','SYSTEM') and gvs.STATUS = 'ACTIVE' and gvs.SECONDS_IN_WAIT > &WAIT_TIME_IN_SECONDS order by gvs.INST_ID, gvs.SECONDS_IN_WAIT
/
******************************
/home/satvvikv/gasw.sql
******************************
SET LINES 188 PAGES 1000
COL EVENT FOR A33
COL P2TEXT FOR A14
COL P1TEXT FOR A25
COL P3TEXT FOR A25
COL SID FOR 999999
COL STATE FOR A10 TRUNC
COL USERNAME FOR A15
SELECT A.INST_ID, A.USERNAME, A.SQL_ID, EVENT, SECONDS_IN_WAIT SECS_WAIT, STATE, WAIT_TIME, P1, P2, P1TEXT, P2TEXT
FROM GV$SESSION A, GV$PROCESS B WHERE A.PADDR = B.ADDR
AND A.INST_ID=B.INST_ID
AND A.STATUS = 'ACTIVE' AND A.USERNAME IS NOT NULL
AND A.TYPE != 'BACKGROUND'
ORDER BY 3;
******************************
/home/satvvikv/gasw_history.sql
******************************
col TOTAL_WAIT for 999999999999999;
select en.name WAIT_EVENT, sum(ash.wait_time + ash.time_waited) TOTAL_WAIT from gv$active_session_history ash, gv$event_name en where ash.event_id = en.event_id and en.wait_class <> 'Idle' group by en.name order by 2 desc
/
******************************
/home/satvvikv/gather_index_stats.sql
******************************
select 'exec dbms_stats.gather_index_stats('''||OWNER||''','''||INDEX_NAME||''', granularity => ''AUTO'', DEGREE => 32);' from dba_indexes  where owner = '&OWNER' and index_name = '&INDEX_NAME';
******************************
/home/satvvikv/gather_table_stats.sql
******************************
select 'exec dbms_stats.gather_table_stats('''||OWNER||''','''||TABLE_NAME||''',granularity => ''AUTO'',cascade => TRUE, DEGREE => 32);' from dba_tables  where owner = '&OWNER' and table_name = '&TABLE_NAME';
******************************
/home/satvvikv/gevent.sql
******************************
COL MODULE FOR A40
COL OSUSER FOR A10
COL USERNAME FOR A8
SET PAGES 10000
COL SID FOR 99999999
COL SQL_CNT FOR 9999999
COL MACHINE FOR A10
COL LAST_CALL_ET FOR 999999
COL SPID  FOR A6
COL APP_ID FOR A6
BREAK ON SQL_ID dup skip page
SELECT /*SATVVIK*/ VP.SPID, VS.SQL_ID, (SELECT OPTIMIZER_COST FROM V$SQLAREA WHERE SQL_ID=VS.SQL_ID) SQL_COST, (SELECT COUNT(1) FROM V$SESSION WHERE SQL_HASH_VALUE=VS.SQL_HASH_VALUE) SQL_COUNT, VS.PROCESS APP_ID, VS.USERNAME, TO_CHAR(VS.LOGON_TIME,'DD-MM HH24:MI:SS') LOGON_TIME, VS.OSUSER, VS.MODULE, VS.MACHINE, VS.LAST_CALL_ET FROM GV$SESSION VS, GV$PROCESS VP WHERE VS.PADDR = VP.ADDR AND  VS.STATUS = 'ACTIVE' AND VS.USERNAME IS NOT NULL AND VS.TYPE <> 'BACKGROUND' AND VS.EVENT='&EVENT' ORDER BY 3,8;
******************************
/home/satvvikv/gftext.sql
******************************
set head off;
set long 99999999
select SQL_FULLTEXT from gv$sqlarea where SQL_ID = '&SQL_ID';
set head on;

******************************
/home/satvvikv/ghw.sql
******************************
col HOLDER_SQL_PREVSQL_PROCESS for a40
col MODULE for a30
col OSUSER for a10
col USERNAME for a10
select inst_id,sid WAITER_SID, blocking_session HOLDER_SID ,
(select sql_id||'|'||prev_sql_id ||'|'||process from gv$session where sid=sess.blocking_session and rownum <=1) holder_sql_prevsql_process ,
sql_id WAITER_sqlid,seconds_in_wait secw, username,module,osuser,last_call_et/60 wait_mins
from gv$session sess
WHERE  blocking_session IS NOT NULL
order by 2;
******************************
/home/satvvikv/gkill_id.sql
******************************
SELECT 'ALTER SYSTEM KILL SESSION '''||SID||','||SERIAL#||',@'||INST_ID||''' IMMEDIATE;' FROM GV$SESSION  WHERE SQL_ID='&SQL_ID';
******************************
/home/satvvikv/glc.sql
******************************
COL MODULE FOR A40
COL OSUSER FOR A10
COL USERNAME FOR A8
SET PAGES 10000
COL SID FOR 99999999
COL SQL_CNT FOR 9999999
COL MACHINE FOR A10
COL LAST_CALL_ET FOR 999999
COL SPID  FOR A6
COL APP_ID FOR A6
SELECT /*SATVVIK*/ VS.INST_ID, VP.SPID, VS.SQL_ID, (SELECT OPTIMIZER_COST FROM V$SQLAREA WHERE SQL_ID=VS.SQL_ID) SQL_COST, (SELECT COUNT(1) FROM V$SESSION WHERE SQL_HASH_VALUE=VS.SQL_HASH_VALUE) SQL_COUNT, VS.PROCESS APP_ID, VS.USERNAME, TO_CHAR(VS.LOGON_TIME,'DD-MM HH24:MI:SS') LOGON_TIME, VS.OSUSER, VS.MODULE, VS.MACHINE, VS.LAST_CALL_ET FROM GV$SESSION VS, GV$PROCESS VP WHERE VS.PADDR = VP.ADDR AND  VS.STATUS = 'ACTIVE' AND VS.USERNAME IS NOT NULL AND VS.TYPE <> 'BACKGROUND' AND VS.SQL_ID IS NOT NULL ORDER BY 4,3,11;
******************************
/home/satvvikv/glc_d.sql
******************************
COL MODULE FOR A40
COL OSUSER FOR A10
COL USERNAME FOR A8
SET PAGES 10000
COL SID FOR 99999999
COL SQL_CNT FOR 9999999
COL MACHINE FOR A10
COL LAST_CALL_ET FOR 999999
COL SPID  FOR A6
COL APP_ID FOR A6
SELECT /*SATVVIK*/ VP.SPID, VS.SQL_ID, (SELECT OPTIMIZER_COST FROM V$SQLAREA WHERE SQL_ID=VS.SQL_ID) SQL_COST, (SELECT COUNT(1) FROM V$SESSION WHERE SQL_HASH_VALUE=VS.SQL_HASH_VALUE) SQL_COUNT, VS.PROCESS APP_ID, VS.USERNAME, TO_CHAR(VS.LOGON_TIME,'DD-MM HH24:MI:SS') LOGON_TIME, VS.OSUSER, VS.MODULE, VS.MACHINE, VS.LAST_CALL_ET FROM GV$SESSION VS, GV$PROCESS VP WHERE VS.PADDR = VP.ADDR AND  VS.STATUS = 'ACTIVE' AND VS.USERNAME IS NOT NULL AND VS.TYPE <> 'BACKGROUND' AND VS.SQL_ID = '&SQL_ID' ORDER BY 10,3,11;
******************************
/home/satvvikv/glc_mach.sql
******************************
COL MODULE FOR A40
COL OSUSER FOR A10
COL USERNAME FOR A8
SET PAGES 10000
COL SID FOR 99999999
COL SQL_CNT FOR 9999999
COL MACHINE FOR A10
COL LAST_CALL_ET FOR 999999
COL SPID  FOR A6
COL APP_ID FOR A6
BREAK ON MACHINE dup skip page
SELECT /*SATVVIK*/ VS.INST_ID, VP.SPID, VS.SQL_ID, (SELECT OPTIMIZER_COST FROM V$SQLAREA WHERE SQL_ID=VS.SQL_ID) SQL_COST, (SELECT COUNT(1) FROM V$SESSION WHERE SQL_HASH_VALUE=VS.SQL_HASH_VALUE) SQL_COUNT, VS.PROCESS APP_ID, VS.USERNAME, TO_CHAR(VS.LOGON_TIME,'DD-MM HH24:MI:SS') LOGON_TIME, VS.OSUSER, VS.MODULE, VS.MACHINE, VS.LAST_CALL_ET FROM GV$SESSION VS, GV$PROCESS VP WHERE VS.PADDR = VP.ADDR AND  VS.STATUS = 'ACTIVE' AND VS.USERNAME IS NOT NULL AND VS.TYPE <> 'BACKGROUND' AND VS.SQL_ID IS NOT NULL ORDER BY 10,3,11;
******************************
/home/satvvikv/gsql.sql
******************************
select distinct gv.sql_id from gv$session gv, gv$sql gs where gv.sql_id=gs.sql_id and gs.sql_text like '%&SAMPLE_SQL_TEXT%' and gv.status= 'ACTIVE' and gv.username <> 'SYS'
/
******************************
/home/satvvikv/gv_lock.sql
******************************
alter session set "_hash_join_enabled"=TRUE;
select object_id, object_name from dba_objects where object_id in (select object_id from gv$locked_object);
******************************
/home/satvvikv/high_cpu.sql
******************************
-- sessions with highest CPU consumption
SELECT s.sid, s.serial#, p.spid as "OS PID",s.username, s.module, st.value/100 as "CPU sec"
FROM v$sesstat st, v$statname sn, v$session s, v$process p
WHERE sn.name = 'CPU used by this session' -- CPU
AND st.statistic# = sn.statistic#
AND st.sid = s.sid
AND s.paddr = p.addr
AND s.last_call_et < 1800 -- active within last 1/2 hour
AND s.logon_time > (SYSDATE - 240/1440) -- sessions logged on within 4 hours
ORDER BY st.value;
******************************
/home/satvvikv/hotblock.sql
******************************
COL OBJECT_NAME FOR A60;
SELECT INST_ID, COUNT(*), SQL_ID, NVL(O.OBJECT_NAME, ASH.CURRENT_OBJ#) OBJECT_NAME, SUBSTR(O.OBJECT_TYPE, 0,10) OBJECT_TYPE, CURRENT_FILE# FILE_NUMBER, CURRENT_BLOCK# BLOCK_NUMBER FROM GV$ACTIVE_SESSION_HISTORY ASH, DBA_OBJECTS O WHERE EVENT LIKE '%&EVENT%' AND O.OBJECT_ID (+) = ASH.CURRENT_OBJ# GROUP BY INST_ID, SQL_ID, CURRENT_OBJ#, CURRENT_FILE#, CURRENT_BLOCK#, O.OBJECT_NAME, O.OBJECT_TYPE ORDER BY 1,2,4;
******************************
/home/satvvikv/hw.sql
******************************
col HOLDER_SQL_PREVSQL_PROCESS for a40
col MODULE for a30
col OSUSER for a10
col USERNAME for a10
select sid WAITER_SID, blocking_session HOLDER_SID,
(select sql_id||'|'||prev_sql_id ||'|'||process from v$session where sid=sess.blocking_session and rownum <=1) holder_sql_prevsql_process ,
sql_id WAITER_sqlid,seconds_in_wait secw, username,module,osuser,last_call_et/60 wait_mins
from v$session sess
WHERE  blocking_session IS NOT NULL
order by 9;
******************************
/home/satvvikv/indinfo.sql
******************************
COL OWNER FOR A20;
COL INDEX_NAME FOR A30;
COL INDEX_TYPE FOR A10;
COL TABLESPACE_NAME FOR A25;
COL DEGREE FOR A6
COL NUM_ROWS FOR 9999999999999;
SELECT OWNER, INDEX_NAME, INDEX_TYPE, PARTITIONED, COMPRESSION, TABLESPACE_NAME, NUM_ROWS, LAST_ANALYZED, DEGREE, STATUS FROM DBA_INDEXES WHERE TABLE_NAME = UPPER('&TABLE_NAME');
******************************
/home/satvvikv/kill_id.sql
******************************
SELECT 'ALTER SYSTEM KILL SESSION '''||SID||','||SERIAL#||''' IMMEDIATE;' FROM GV$SESSION  WHERE SQL_ID='&SQL_ID';
******************************
/home/satvvikv/killi_bobind.sql
******************************
PROMPT DAILY_TRAN_HEADER_TABLE
PROMPT DAILY_TRAN_DETAIL_TABLE
PROMPT GENERAL_ACCT_MAST_TABLE
SELECT 'ALTER SYSTEM KILL SESSION ' ||''''||SID||','||' '||SERIAL#||''''||' '||'IMMEDIATE'||';' AS SESSION_KILL, LOGON_TIME, LAST_CALL_ET, SQL_ID, PREV_SQL_ID FROM V$SESSION WHERE SID IN (SELECT SESSION_ID  FROM  GV$LOCKED_OBJECT WHERE OBJECT_ID IN (SELECT OBJECT_ID FROM DBA_OBJECTS WHERE OBJECT_NAME='&OBJECT_NAME')) AND STATUS ='INACTIVE' AND LOGON_TIME < (SYSDATE - &TIME_IN_HRS/24) ORDER BY LOGON_TIME;
******************************
/home/satvvikv/killi_lock.sql
******************************
SELECT 'ALTER SYSTEM KILL SESSION ' ||''''||SID||','||' '||SERIAL#||''''||' '||'IMMEDIATE'||';' AS SESSION_KILL, LOGON_TIME, LAST_CALL_ET, SQL_ID, PREV_SQL_ID FROM V$SESSION WHERE SID IN (SELECT SESSION_ID  FROM  GV$LOCKED_OBJECT WHERE OBJECT_ID IN (SELECT OBJECT_ID FROM DBA_OBJECTS WHERE OBJECT_NAME='&OBJECT_NAME')) AND STATUS ='INACTIVE' AND LOGON_TIME < (SYSDATE - &TIME_IN_HRS/24) ORDER BY LOGON_TIME;
******************************
/home/satvvikv/lc.sql
******************************
COL MODULE FOR A40
COL OSUSER FOR A10
COL USERNAME FOR A8
SET PAGES 10000
COL SID FOR 99999999
COL SQL_CNT FOR 9999999
COL MACHINE FOR A10
COL LAST_CALL_ET FOR 999999
COL SPID  FOR A6
COL APP_ID FOR A6
BREAK ON SQL_ID dup skip page
SELECT /*SATVVIK*/ VP.SPID, VS.SQL_ID, (SELECT OPTIMIZER_COST FROM V$SQLAREA WHERE SQL_ID=VS.SQL_ID) SQL_COST, (SELECT COUNT(1) FROM V$SESSION WHERE SQL_HASH_VALUE=VS.SQL_HASH_VALUE) SQL_COUNT, VS.PROCESS APP_ID, VS.USERNAME, TO_CHAR(VS.LOGON_TIME,'DD-MM HH24:MI:SS') LOGON_TIME, VS.OSUSER, VS.MODULE, VS.MACHINE, VS.LAST_CALL_ET FROM V$SESSION VS, V$PROCESS VP WHERE VS.PADDR = VP.ADDR AND  VS.STATUS = 'ACTIVE' AND VS.USERNAME IS NOT NULL AND VS.TYPE <> 'BACKGROUND' AND VS.SQL_ID IS NOT NULL ORDER BY 3,2,11;
******************************
/home/satvvikv/lc_d.sql
******************************
COL MODULE FOR A40
COL OSUSER FOR A10
COL USERNAME FOR A8
SET PAGES 10000
COL SID FOR 99999999
COL SQL_CNT FOR 9999999
COL MACHINE FOR A10
COL LAST_CALL_ET FOR 999999
COL SPID  FOR A6
COL APP_ID FOR A6
SELECT /*SATVVIK*/ VP.SPID, VS.SQL_ID, (SELECT OPTIMIZER_COST FROM V$SQLAREA WHERE SQL_ID=VS.SQL_ID) SQL_COST, (SELECT COUNT(1) FROM V$SESSION WHERE SQL_HASH_VALUE=VS.SQL_HASH_VALUE) SQL_COUNT, VS.PROCESS APP_ID, VS.USERNAME, TO_CHAR(VS.LOGON_TIME,'DD-MM HH24:MI:SS') LOGON_TIME, VS.OSUSER, VS.MODULE, VS.MACHINE, VS.LAST_CALL_ET FROM V$SESSION VS, V$PROCESS VP WHERE VS.PADDR = VP.ADDR AND  VS.STATUS = 'ACTIVE' AND VS.USERNAME IS NOT NULL AND VS.TYPE <> 'BACKGROUND' AND VS.SQL_ID = '&SQL_ID' ORDER BY 10,3,11;
******************************
/home/satvvikv/lc_mach.sql
******************************
COL MODULE FOR A40
COL OSUSER FOR A10
COL USERNAME FOR A8
SET PAGES 10000
COL SID FOR 99999999
COL SQL_CNT FOR 9999999
COL MACHINE FOR A10
COL LAST_CALL_ET FOR 999999
COL SPID  FOR A6
COL APP_ID FOR A6
BREAK ON MACHINE dup skip page
SELECT /*SATVVIK*/ VP.SPID, VS.SQL_ID, (SELECT OPTIMIZER_COST FROM V$SQLAREA WHERE SQL_ID=VS.SQL_ID) SQL_COST, (SELECT COUNT(1) FROM V$SESSION WHERE SQL_HASH_VALUE=VS.SQL_HASH_VALUE) SQL_COUNT, VS.PROCESS APP_ID, VS.USERNAME, TO_CHAR(VS.LOGON_TIME,'DD-MM HH24:MI:SS') LOGON_TIME, VS.OSUSER, VS.MODULE, VS.MACHINE, VS.LAST_CALL_ET FROM V$SESSION VS, V$PROCESS VP WHERE VS.PADDR = VP.ADDR AND  VS.STATUS = 'ACTIVE' AND VS.USERNAME IS NOT NULL AND VS.TYPE <> 'BACKGROUND' AND VS.SQL_ID IS NOT NULL ORDER BY 10,3,11;
******************************
/home/satvvikv/lock.sql
******************************
SELECT (SELECT USERNAME FROM V$SESSION WHERE SID=A.SID) BLOCKER, A.SID, 'IS BLOCKING', (SELECT USERNAME FROM V$SESSION WHERE SID = B.SID) BLOCKEE,  B.SID FROM V$LOCK A, V$LOCK B WHERE A.BLOCK=1 AND B.REQUEST > 0 AND A.ID1=B.ID1 AND A.ID2=B.ID2;
******************************
/home/satvvikv/locki.sql
******************************
col session_kill for a60
select 'alter system kill session ' ||''''||sid||','||' '||serial#||''''||' '||'immediate'||';' as session_kill,LOGON_TIME,last_call_et,sql_id,prev_sql_id from v$session where sid in (select SESSION_ID  from  v$locked_object where OBJECT_ID in (select OBJECT_ID from dba_objects where OBJECT_NAME='&TABLE_NAME')) and status ='INACTIVE' order by LOGON_TIME;
******************************
/home/satvvikv/logfiles.sql
******************************
COL MEMBER FOR A85
SELECT GROUP#, MEMBER, TYPE, STATUS FROM V$LOGFILE ORDER BY 1;
******************************
/home/satvvikv/lops_d.sql
******************************
-- AUTHOR: SATVVIK VISSWANATHAN
-- SCRIPT: Get Estimates of Long Running Operations By Operation Name
SELECT OPNAME, ROUND(SOFAR/TOTALWORK*100,2) PERCENT_COMPLETE, ELAPSED_SECONDS/60/60 ELAPSED_HRS, TIME_REMAINING/60/60 TIME_REMAINING_HRS from gv$session_longops where OPNAME = '&OPERATION_NAME';
******************************
/home/satvvikv/planc.sql
******************************
 select ss.snap_id, ss.instance_number node, begin_interval_time, s.sql_id, s.plan_hash_value,
 nvl(executions_delta,0) execs,
 (elapsed_time_delta/decode(nvl(executions_delta,0),0,1,executions_delta))/1000000 avg_etime,
 (buffer_gets_delta/decode(nvl(buffer_gets_delta,0),0,1,executions_delta)) avg_lio
 from DBA_HIST_SQLSTAT S,
DBA_HIST_SNAPSHOT SS
 where s.sql_id = nvl('&sql_id','4dqs2k5tynk61')
 and ss.snap_id = S.snap_id
 and ss.instance_number = S.instance_number
 and executions_delta > 0 --and plan_hash_value=3099046451
 order by 1, 2, 3;
******************************
/home/satvvikv/redo.sql
******************************
SELECT THREAD#, GROUP#, MEMBERS, ARCHIVED, STATUS, SEQUENCE#, BYTES/1024/1024 "SIZE IN MB", TO_CHAR(FIRST_TIME, 'DD-MON-YY HH:MI:SS') "FIRST TIME" FROM V$LOG ORDER BY 2;
******************************
/home/satvvikv/repair.sql
******************************
set time off
set timing off
set echo off
set head off
set pages 50000
set lines 200
set feedback on
select 'alter '||decode(object_type,'PACKAGE BODY','PACKAGE' ,object_type) ||' '||
 ltrim(rtrim(Object_name)) ||
 decode(object_type,'PACKAGE BODY',' compile body',' compile')||' ;'
 from dba_objects
 where status = 'INVALID';
set time on
set timing on
set heading on

******************************
/home/satvvikv/resume.sql
******************************
select user_id, instance_id, status, name, error_msg, suspend_time, resume_time from dba_resumable;
******************************
/home/satvvikv/rman_progress.sql
******************************
col opname for a35
break on CONTEXT on report
compute SUM of SOFAR_GB TOTALWORK_GB TIME_MIN on report
compute max of TIME_MIN  on report
compute sum of PENDING_GB on report
SELECT sid, opname, context, sofar*8192/1024/1024/1024 sofar_GB, totalwork*8192/1024/1024/1024 totalwork_GB, (totalwork-sofar)*8192/1024/1024/1024 pending_gb,
TIME_REMAINING/60 time_min, ELAPSED_SECONDS/60 elps_min, ELAPSED_SECONDS elpse_time,  round(sofar/totalwork*100, 2) compl ,
round(sofar*8192/1024/1024/ELAPSED_SECONDS, 2) speed_MB_per_sec
FROM v$session_longops
WHERE opname LIKE 'RMAN%'
/*AND opname NOT LIKE '%aggregate%'*/
AND totalwork != 0
AND sofar <> totalwork;
******************************
/home/satvvikv/seginfo.sql
******************************
COL OWNER FOR A20;
COL SEGMENT_NAME FOR A35;
COL TABLESPACE_NAME FOR A35;
SELECT OWNER, SEGMENT_NAME, SEGMENT_TYPE, TABLESPACE_NAME, SUM(BYTES)/1024/1024/1024 "SIZE IN GB" FROM DBA_SEGMENTS WHERE SEGMENT_NAME = UPPER('&SEGMENT_NAME') GROUP BY OWNER, SEGMENT_NAME, SEGMENT_TYPE, TABLESPACE_NAME ORDER BY 1;
******************************
/home/satvvikv/sgastat.sql
******************************
alter session set nls_date_format='DD-MM-YYYY HH24:MI:SS';
PROMPT ************************
select sysdate from dual;
PROMPT ************************
select POOL,NAME,round(BYTES/1024/1024,0) size_mb from v$sgastat where name='FileOpenBlock';
exit
******************************
/home/satvvikv/sql.sql
******************************
select distinct v.sql_id from v$session v, v$sql s where v.sql_id=s.sql_id and s.sql_text like '%&SAMPLE_SQL_TEXT%' and v.status= 'ACTIVE' and v.username <> 'SYS';
******************************
/home/satvvikv/sql_stat.sql
******************************
******************************
/home/satvvikv/sql_stats.sql
******************************
 select ss.snap_id, begin_interval_time, s.sql_id, s.PARSE_CALLS_TOTAL, s.FETCHES_TOTAL, s.PHYSICAL_READ_BYTES_TOTAL,s.IOWAIT_TOTAL,
 nvl(executions_delta,0) execs,
 (elapsed_time_delta/decode(nvl(executions_delta,0),0,1,executions_delta))/1000000 avg_etime,
 (buffer_gets_delta/decode(nvl(buffer_gets_delta,0),0,1,executions_delta)) avg_lio
 from DBA_HIST_SQLSTAT S,
DBA_HIST_SNAPSHOT SS
 where s.sql_id = nvl('&sql_id','4dqs2k5tynk61')
 and ss.snap_id = S.snap_id
 and ss.instance_number = S.instance_number
 and executions_delta > 0 --and plan_hash_value=3099046451
 order by 1, 2, 3;
******************************
/home/satvvikv/sqld.sql
******************************
select sql_id,disk_Reads,buffer_gets,buffer_gets/executions,executions from v$sqlarea where sql_id = '&sqlid';
******************************
/home/satvvikv/start.sql
******************************
col instance_name new_value instance_name noprint;
select user ||'@' || instance_name "instance_name" from v$instance;
set sqlprompt "&instance_name>";
set timing on;
set time on;
set lines 198;
set pages 1000;
set colsep |;
set long 99999999;
alter session set nls_date_format='DD-MM-YYYY HH24:MI:SS';
clear scr
******************************
/home/satvvikv/status.sql
******************************
set lines 198 pages 1000 timing on time on
col HOST_NAME for a40
select INSTANCE_NUMBER,INSTANCE_NAME,host_name,name db_name, database_role, open_mode,to_char(startup_time,'DD-MM-YYYY HH24:MI:SS') Start_Time,  to_char(sysdate,'DD-MM-YYYY HH24:MI:SS') System_Time from gv$database a, gv$instance b  where a.inst_id=b.instance_number order by 1;
******************************
/home/satvvikv/sync.sql
******************************
select  ads.DESTINATION , ads.STATUS, ads.TYPE, ads.ARCHIVED_SEQ#, ads.APPLIED_SEQ#,
a.ARCHIVED_SEQ#-ads.ARCHIVED_SEQ# archive_gap, decode(ads.APPLIED_SEQ#,0,0,a.ARCHIVED_SEQ#-ads.APPLIED_SEQ#)  applied_gap, ads.ERROR
from v$archive_dest_status ads, (select ARCHIVED_SEQ# from v$archive_dest_status where dest_id=1) a
where status <> 'INACTIVE';
******************************
/home/satvvikv/sync_dc.sql
******************************
select THREAD#, max(SEQUENCE#), ARCHIVED, APPLIED, DEST_ID from v$archived_log where DEST_ID = 1 group by THREAD#, ARCHIVED, APPLIED, DEST_ID order by 1;
******************************
/home/satvvikv/sync_dr.sql
******************************
select THREAD#, max(SEQUENCE#), ARCHIVED, APPLIED, DEST_ID from v$archived_log where DEST_ID = 2 and APPLIED = 'YES' group by THREAD#, ARCHIVED, APPLIED, DEST_ID order by 1;
******************************
/home/satvvikv/syninfo.sql
******************************
SELECT OWNER, TABLE_OWNER, TABLE_NAME FROM DBA_SYNONYMS WHERE SYNONYM_NAME = '&SYNONYM_NAME';
******************************
/home/satvvikv/tabinfo.sql
******************************
COL DEGREE FOR A10
SELECT OWNER, TABLE_NAME, PARTITIONED, COMPRESSION, TABLESPACE_NAME, NUM_ROWS, LAST_ANALYZED, DEGREE FROM DBA_TABLES WHERE TABLE_NAME = UPPER('&TABLE_NAME');
******************************
/home/satvvikv/tabpart.sql
******************************
SELECT TABLE_OWNER, TABLE_NAME, PARTITION_NAME, TABLESPACE_NAME, COMPRESSION, COMPRESS_FOR, SUM(BLOCKS*8192)/1024/1024/1024 "SIZE IN GB" FROM DBA_TAB_PARTITIONS WHERE TABLE_NAME = '&TABLE_NAME' GROUP BY TABLE_OWNER, TABLE_NAME, PARTITION_NAME, TABLESPACE_NAME, COMPRESSION, COMPRESS_FOR ORDER BY 3;
/
******************************
/home/satvvikv/tabpart_d.sql
******************************
 select table_owner, table_name, partition_name, compression, compress_for, (blocks*8192)/1024/1024/1024 GB from dba_tab_partitions where table_name = upper('&tabname') order by 3
/
******************************
/home/satvvikv/tbs.sql
******************************
set line 160
set pagesize 50
col file_name for a80
col TABLESPACE_NAME for a25
select df.FILE_NAME,df.TABLESPACE_NAME,round(df.BYTES/1024/1024) Allocated,
round(nvl(dfs.BYTES/1024/1024,0)) FREE_SPACE, round(((df.BYTES/1024/1024)-nvl((dfs.BYTES/1024/1024),0))) USED_SPACE
from dba_data_files df,
(select file_id,tablespace_name,sum(bytes) bytes
 from dba_free_space
 group by file_id,tablespace_name) dfs
where df.FILE_ID=dfs.FILE_ID (+)
AND df.TABLESPACE_NAME=upper('&TABLESPACE_NAME')
order by free_space
--substr(df.FILE_NAME,-6,2)
/
******************************
/home/satvvikv/temp_usage.sql
******************************
col tablespace for a25
col EVENT for a35
col USERNAME a15
SELECT s.inst_id,s.sid,s.sql_id,t.TABLESPACE,s.event,s.username,SUM (T.blocks) * TBS.block_size / 1024 / 1024 mb_used, COUNT(*) statements FROM gv$sort_usage T, gv$session S, dba_tablespaces TBS WHERE T.session_addr = S.saddr AND T.tablespace = TBS.tablespace_name and T.inst_id=s.inst_id GROUP BY s.inst_id,s.sid,s.sql_id,s.event,s.username,T.blocks,TBS.block_size,t.tablespace order by 7
/
******************************
/home/satvvikv/tempfiles.sql
******************************
COL FILE_NAME FOR A60
SELECT FILE_ID, FILE_NAME, TABLESPACE_NAME, BYTES/1024/1024/1024 "SIZE IN GB" FROM DBA_TEMP_FILES ORDER BY 3;
******************************
/home/satvvikv/tempfree.sql
******************************
SELECT TFS.TABLESPACE_NAME "TABLESPACE NAME", ROUND(TFS.TABLESPACE_SIZE/1024/1024/1024) "TABLESPACE SIZE", ROUND(TFS.ALLOCATED_SPACE/1024/1024/1024) "ALLOCATED SPACE", ROUND(SUM(TSH.BYTES_FREE/1024/1024/1024)) "FREE SPACE", ROUND(TFS.FREE_SPACE/1024/1024/1024) "ACTUAL FREE SPACE" FROM DBA_TEMP_FREE_SPACE TFS INNER JOIN V$TEMP_SPACE_HEADER TSH
ON TFS.TABLESPACE_NAME = TSH.TABLESPACE_NAME GROUP BY TFS.TABLESPACE_NAME,TFS.TABLESPACE_SIZE,TFS.ALLOCATED_SPACE,TFS.FREE_SPACE ORDER BY 1;
******************************
/home/satvvikv/tempfree1.sql
******************************
select tablespace_name,round(sum(bytes_used/1024/1024/1024)) "Used_Space(MB)",round(sum(bytes_free/1024/1024/1024)) "Free_Space(GB)" from v$temp_space_header group by tablespace_name order by 1;******************************
/home/satvvikv/total_temp_usage.sql
******************************
select SUM (T.blocks) * TBS.block_size / 1024 / 1024 / 1024 "TEMP USAGE(GB)" FROM gv$sort_usage T, gv$session S, dba_tablespaces TBS WHERE T.session_addr = S.saddr AND T.tablespace = TBS.tablespace_name and T.inst_id=s.inst_id GROUP BY TBS.block_size;
******************************
/home/satvvikv/try.sql
******************************
col TABLESPACE_NAME format a30
col TOTAL_SIZE_GB format 999999999.99
col FREE_SIZE_GB format 9999999999.99
col USED_SIZE_GB format 999999999.99
select df.tablespace_name TABLESPACE_NAME, df.bytes/(1024*1024*1024) TOTAL_SIZE_GB, (df.bytes/(1024*1024*1024) -sum(fs.bytes)/(1024*1024*1024)) USED_SIZE_GB, sum(fs.bytes)/(1024*1024*1024) FREE_SIZE_GB, round(sum(fs.bytes)*100/df.bytes) FREE_PERCENT, round((df.bytes-sum(fs.bytes))*100/df.bytes) USED_PERCENT from dba_free_space fs, (select tablespace_name, sum(bytes) bytes from dba_data_files  group by tablespace_name ) df where fs.tablespace_name = df.tablespace_name group by df.tablespace_name, df.bytes order by USED_PERCENT;
******************************
/home/satvvikv/ts_frag.sql
******************************
select sum(dfs.bytes)/1024/1024/1024 BYTES_GB from dba_free_space dfs where dfs.tablespace_name = '&&TABLESPACE_NAME' and dfs.bytes < (select min(ds.next_extent) from dba_segments ds where ds.tablespace_name = '&&TABLESPACE_NAME') order by dfs.block_id;
******************************
/home/satvvikv/waitstat.sql
******************************
select inst_id,METRIC_NAME,VALUE from gv$sysmetric where METRIC_NAME like 'Database Wait Time%' order by inst_id
/
******************************
/home/satvvikv/ws.sql
******************************
col event form a45
col wait_class form a15
col no_of_sessions form 999999
col avg_wait_time form 999999
col wait_time form 999999
select inst_id,event, wait_class, sum(SECONDS_IN_WAIT), sum(WAIT_TIME), count(1) no_of_sessions from gv$session_wait where wait_class <> 'Idle'
group by inst_id,event, wait_class
order by 1;
******************************
/home/satvvikv/ws_old.sql
******************************
column spoolfile new_value xspoolfile;
select 'wait_stats.'||instance_name||'-'||lower(HOST_NAME)||'.'||lower(to_char(sysdate,'DDMMYYYYHH24MI'))||'.lst' spoolfile from v$instance;
spool &xspoolfile;
col event form a45
col wait_class form a15
col no_of_sessions form 999999
col avg_wait_time form 999999
col wait_time form 999999
select inst_id,event, wait_class, sum(SECONDS_IN_WAIT), sum(WAIT_TIME), count(1) no_of_sessions from gv$session_wait where wait_class <> 'Idle'
group by inst_id,event, wait_class
order by 1
******************************
/home/satvvikv/xplan.sql
******************************
select * from table (dbms_xplan.display);

Monday, January 27, 2014

How to change the Grid infrastructure home path

Changing the Oracle Grid Infrastructure Home Path

With release 11.2.0.3 and later, after installing Oracle Grid Infrastructure for a cluster (Oracle Clusterware and Oracle ASM configured for a cluster), if you need to change the Grid home path, then use the following example as a guide to detach the existing Grid home, and to attach a new Grid home:
Caution:
Before changing the Grid home, you must shut down all executables that run in the Grid home directory that you are relinking. In addition, shut down applications linked with Oracle shared libraries.
  1. Detach the existing Grid home by running the following command as the Oracle Grid Infrastructure installation owner (grid), where/u01/app/11.2.0/grid is the existing Grid home location:
    $ cd /u01/app/11.2.0/grid/oui/bin
    $ ./detachhome.sh -silent -local -invPtrLoc /u01/app/11.2.0/grid/oraInst.loc
    
  2. As root, unlock and move the Grid binaries from the old Grid home location to the new Grid home location. For example, where the old Grid home is/u01/app/11.2.0/grid and the new Grid home is /u01/app/grid:
    cd /u01/app/11.2.0/grid/
    perl rootcrs.pl -unlock
    # mv /u01/app/11.2.0/grid /u01/app/grid
    
  3. Clone the Oracle Grid Infrastructure installation, using the instructions provided in "Creating a Cluster by Cloning Oracle Clusterware Step 3: Run the clone.pl Script on Each Destination Node," in Oracle Clusterware Administration and Deployment Guide.
    When you navigate to the Grid_home/clone/bin directory and run the clone.pl script, provide values for the input parameters that provide the path information for the new Grid home.
  4. As root again, enter the following commands to start up in the new home location:
    # cd Grid_home/rdbms/install/
    # ./rootadd_rdbms.sh
    # cd Grid_home/crs/install
    # perl rootcrs.pl -patch -destcrshome /u01/app/grid
    
  5. Repeat steps 1 through 4 on each cluster member node.
You must relink the Oracle Clusterware and Oracle ASM binaries every time you move the Grid home.

Friday, January 24, 2014

Log file sync wait resolution

Adaptive Log File Sync: Oracle, Please Don’t Do That Again



Disclaimer: Much of what follows is pure speculation on my part. It could be completely wrong, and I’m putting it out there in the hopes that it’ll eventually be proven one way or the other.

The Summary

  • Underscore parameter _use_adaptive_log_file_sync
    • Default value changed in 11.2.0.3 from FALSE to TRUE
    • Dynamic parameter
  • Enables a new method of communication for LGWR to notify foreground processes of commit
    • Old method used semaphores, LGWR had to explicitly “post” every waiting process
    • New method has the FG processes sleep and “poll” to see if commit is complete
    • Advantage is to free LGWR from CPU work required to inform lots of processes about commits
  • LGWR dynamically switches between old and new method based on load and responsiveness
    • Method can switch frequently at runtime, max frequency is 3 switches per minute (configurable)
    • Switch is logged in LGWR tracefile, we have seen several switches per day
  • Few problems in general, possible issues seem to be in RAC and/or the switching process itself

The Story

We’re working with a customer who had a very successful Exadata go-live last weekend. They moved a large application mostly unchanged from 9i to Exadata – and we’ve generally had very positive feedback. (!!) It might be that just getting on modern hardware and software accounts for this, but it still gives everyone a good feeling.
Nevertheless, there are always a few little adventures. One of these was a mysterious orange glob on the Cloud Control radar Monday at 1am. Orange is the “commit” class – so this was a little surprising! (And reminiscent of Karl Arao’s famous halloween monster!) A few drill-down clicks and we’re looking at “log file sync” events.  To make a long story short, it wasn’t any of the usual suspects, and the necessary diagnostics info wasn’t available after the fact to say what happened with certainty. We’ve done a fairly comprehensive incident report and it hasn’t recurred… So I’m happy for now.
Anyway, I haven’t gotten to the really fun part yet. The really fun part is the #everydaylearning as Yury would say. In the process of analyzing the orange glob, I ended up investigating whether it might be related to a new feature called Adaptive Log File Sync.  I’d never heard of this before so I had quite a bit of fun learning, and it seemed worthwhile to share a few things I found.
Before I dive in, one other important thing: I have to mention Christo Kutrovsky. We were both on-site and digging into this feature at the same time. There was a constant exchange of competing theories and different ideas about how things might work. Together we assembled a strong, in-depth, and working theory very, very quickly – I love working on a great team.

Log File Sync

First off, there are a few basic starting points for anyone troubleshooting time spent in the log file sync event.
In my specific case, I discovered quickly from the ASH data that this was a very unusual situation. This led to some more creative searching in the Oracle Support KnowledgeBase – at which point I discovered something very interesting:

Adaptive Log File Sync

Note 1462942.1 describes a feature whereby LGWR can switch between log write methods.  This feature is enabled through an underscore parameter called _use_adaptive_log_file_sync and the description of this parameter is: adaptively switch between post/wait and polling.
Now I’ll be honest: searching on the internet and searching in Oracle Support’s KnowledgeBase for “adaptive log file sync” yields almost nothing. I do believe in the principle of BAAG – but this is a case where some guesswork might be useful, especially to guide experimentation that could nail down more concrete answers. Hence the disclaimer at the top of this article.
The words “post” and “wait” indicate that we’re talking about semaphores.  For some general background on semaphores, check out the Wikipedia article and for more detail about the unix post() and wait() calls, a good resource is chapter 30 from Andrea Arpaci-Dusseau’s (UW Madison) textbook on Operating Systems.
Oracle uses semaphores extensively. In fact, LGWR and commits are specifically mentioned as an example in the Performance Tuning Guide – and Oracle even has an API to replace semaphore use with a third-party driver for lightweight post-wait implementations. (Like an ODM for your OS kernel.)
If you look for references to “commit” in the Oracle docs, you’ll find the word “post” everywhere when they talk about communication between the foreground processes and LGWR.  Now, remember that a COMMIT has two options: first, IMMEDIATE or BATCH and second, WAIT or NOWAIT.  It looks like this to me:
  • Immediate: FG process will post to LGWR, triggering I/O  (default)
  • Batch: FG process will not post LGWR
  • Wait: LGWR will post FG process when I/O is complete  (default)
  • NoWait: LGWR will not post FG process when I/O is complete
Why change a good thing? My theory is that it ties back to a rare problem Riyaj mentioned in that blog post over 4 years ago.  I’ll quote him:
“LGWR is unable to post the processes fast enough, due to excessive commits. It is quite possible that there is no starvation for CPU or memory, and that I/O performance is decent enough. Still, if there are excessive commits, LGWR has to perform many writes/semctl calls, and this can increase ‘log file sync’ waits. This can also result in sharp increase in redo wastage statistics.”
Maybe Oracle is reading Riyaj’s blog? It appears that they came up with a new algorithm where LGWRdoesn’t post. My guess: foreground processes can probably still post LGWR – but LGWR never posts back. Instead, foreground processes in WAIT mode “poll” either a memory structure or LGWR itself. It could be an in-house implementation by Oracle, it could still use semaphores, or it could use message queues somehow [seems like a long shot but the unix poll() call is found there].
There’s one interesting challenge that I can think of in implementing this. With the semaphore approach, all the active commits are *sleeping* (off the CPU) while LGWR flushes the log buffer.  There could be dozens or hundreds of foreground processes simultaneously commiting on a very busy system.  If we switch to a polling method, how do we ensure that these hundreds of processes don’t start spinning and steal CPU from LGWR, making the whole system even worse than it was in the beginning?
The answer might lie in a quick search for “adaptive_log_file_sync” from the underscore parameters. There are five more hidden parameters with that string:
select a.ksppinm name, b.ksppstvl value, a.ksppdesc description
from sys.x$ksppi a, sys.x$ksppcv b
where a.indx = b.indx and a.ksppinm like '_%adaptive_log%' escape ''
order by name
NameDefaultDescription
_adaptive_log_file_sync_high_switch_freq_threshold3Threshold for frequent log file sync mode switches (per minute)
_adaptive_log_file_sync_poll_aggressiveness0Polling interval selection bias (conservative=0, aggressive=100)
_adaptive_log_file_sync_sched_delay_window60Window (in seconds) for measuring average scheduling delay
_adaptive_log_file_sync_use_polling_threshold200Ratio of redo synch time to expected poll time as a percentage
_adaptive_log_file_sync_use_postwait_threshold50Percentage of foreground load from when post/wait was last used
It appears that there are a number of knobs to turn with this new algorithm – and it looks like Oracle is somehow dynamically calculating a “polling interval”.  Furthermore, it seems to be taking “scheduling delay” into account.
It also seems that by default, we’re limited to switching modes every 20 seconds (3 per minute).  This switching seems to be controlled by “thresholds” – and the threshold for enabling the new polling mode seems to be based on time.  From the default percentage, it looks to me like Oracle won’t switch until it thinks the poll time will be less than half of the current post time.  For switching back it also seems to be another “half” percentage (50), though I’m not sure what “foreground load” might mean.
Now there’s one other underscore parameter which I noticed while looking for things related to log file sync:
NameDefaultDescription
_fg_sync_sleep_usecs0Log file sync via usleep
This parameter is interesting for one reason: The adaptive sync parameters were introduced in 11g, but this parameter was introduced in 10g. And if you think about the name, it actually sounds very similar to the “polling” strategy and not like a semaphore strategy at all! Foreground processes call usleep() during a log sync – if you’re calling wait() then you don’t need to sleep. But if you’re polling, you definitely need to sleep. Maybe Oracle has been working on this idea since 10g? I wouldn’t put it past them. :)
So how do you know if you’re using this feature?  The most obvious sign will be in the LGWR trace file.
There will be messages each time that a switch happens, looking something like this:
*** 2012-10-16 01:47:50.289
kcrfw_update_adaptive_sync_mode: poll->post current_sched_delay=0 switch_sched_delay=1 current_sync_count_delta=8 switch_sync_count_delta=59

*** 2012-10-16 01:47:50.289
Log file sync switching to post/wait
Current approximate redo synch write rate is 2 per sec

*** 2012-10-16 02:51:19.285
kcrfw_update_adaptive_sync_mode: post->poll long#=51 sync#=352 sync=4600 poll=1061 rw=500 rw+=500 ack=7 min_sleep=1061

*** 2012-10-16 02:51:19.285
Log file sync switching to polling
Current scheduling delay is 1 usec
Current approximate redo synch write rate is 117 per sec
I mentioned that the underscore parameters were introduced in 11g; however, my current understanding is that only in 11.2.0.3 did the default value of _use_adaptive_log_file_sync change to true!! Is it comforting that they waited so long, or is it scary that they’d make this change with no available documentation or troubleshooting information?  I guess it depends on your perspective. Here’s mine:

Explanation Please?

  • This is a change of critical, core code that impacted every single database installation and upgrade.
  • I think it is a strange departure from Oracle’s normal practices to change such important code with absolutely no announcement, documentation, or technical troubleshooting information even in their customer-only support database.
  • In my opinion this requires explanation; they can do (and have done) much better.
Regardless, it’s now been 12 months since 11.2.0.3 was released. At present, it’s only mentioned in a single support article. This offers at least some evidence that adaptive log file sync hasn’t caused widespread panic.  For the past year, many of you have been using this feature without even knowing – and there weren’t enough problems to merit even two Oracle Support notes.
I sent a few emails out to friends who also tend to work on large, loaded systems. I wasn’t the first person to stumble across this – James Morle mentioned it in a tweet a few months ago, and (not surprisingly)Riyaj has stumbled across the feature as well. It seems to me that the issues that have been encountered intersect with RAC and/or the switching process itself. But my informal survey was hardly scientific.
Overall, I think Oracle dodged the bullet this time. But they still made an important change without supporting documentation. To my friends inside the big red mothership: Please don’t do that again! We like it better the other way.
Have you ever heard of this new 11.2.0.3 feature? Have you heard of the underscore parameter? I’d love to hear your stories!

Migrate Oracle Database from Standard Edition (SE) to Enterprise Edition (EE) and vice verse

Moving From the Standard Edition to the Enterprise Edition and vice-verse



If you are using a Standard Edition database (Release prior to 11gR1), then you can change it to an Enterprise Edition database.

Step 1
Standard Edition database software should be same as the Enterprise Edition database software.
Step 2
Shutdown the database
Step 3
Shut down your all oracle services including oracle database
Step 4
De-install the Standard Edition oracle software
Step 5
Install the Enterprise Edition server software using the Oracle Universal Installer.
Step 6
Select the same Oracle home that was used for the de-installed Standard Edition. During the installation, be sure to select the Enterprise Edition. When prompted, choose Software only from the Database Configuration screen.
Step 7
Start up your database.
Your database is now upgraded to the Enterprise Edition.

Tips:

1. You can only convert Standard Edition Database to the Enterprise Edition Database by using above method.

2. If you want to convert from an Enterprise Edition database to a Standard Edition database, you must use Export/Import operation. Without Export/Import you can not convert.


Inside Story:

1. The Enterprise Edition contains data dictionary objects which are not available in the Standard Edition. If you just install the Standard Edition software, then you will end up with data dictionary objects which are useless. Some of them might be invalid and possibly create problems when maintaining the database.

2. The Export/Import operation does not introduce data dictionary objects specific to the Enterprise Edition, because the SYS schema objects are not exported. Oracle recommends using the Standard Edition EXP utility to export the data.

3. After the Import in the Standard Edition database, you are only required to drop all user schemas related to Enterprise Edition features, such as the MDSYS account used with Oracle Spatial.

Thursday, January 23, 2014

Archive gap resolution

Archive Gap Resolution using RMAN:
---------------------------------------------------------


1. On the standby database, stop the managed recovery process (MRP):


SQL> SELECT NAME,OPEN_MODE FROM V$DATABASE;

SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;

2. On the standby database, find the SCN which will be used for the incremental
backup at the primary database:

SQL> COL CURRENT_SCN FOR 9999999999999999
SQL> SELECT CURRENT_SCN FROM V$DATABASE;


SQL> COL CHECKPOINT_CHANGE# FOR 9999999999999999
SQL> SELECT DISTINCT CHECKPOINT_CHANGE# FROM V$DATAFILE_HEADER;

NOTE: CURRENT_SCN AND CHECKPOINT_CAHANGE# SHOULD BE SAME IF NOT KINDLY PROVIDE THE CHECKPOINT_CAHANGE# IN STEP NO:3




ON PRIMARY DATABASE:
====================
STEP 3
======
In RMAN, connect to the primary database and create an incremental backup
from the SCN derived in the previous step:

CONNECT TO RMAN ON PRIMARY DATABASE:
===================================

RMAN> run
{
allocate channel c1 device type disk maxpiecesize = 5G;
allocate channel c2 device type disk maxpiecesize = 5G;
allocate channel c3 device type disk maxpiecesize = 5G;
BACKUP INCREMENTAL FROM SCN <CHECKPOINT_CAHANGE#> DATABASE FORMAT 'LOCATION\FILE_NAME_%t_%s_%p.bkp' tag 'FORSTANDBY';
}


TO CHECK THE BACKEDUP FILES:
============================
RMAN>LIST BACKUP;


COPY THE BACKUP SETS CREATED ON PRIMARY TO STANDY USING OS COMMAND (NOTE:LOCATION MUST BE SAME FOR BOTH PRIMARY AND STANDBY)

FOR EX:IN PRIMARY IF THE LOCATION IS M:\RMANBKP\ THE SAME SHOULD BE CREATED ON STANDBY THEN COPY THE BACKEDUP FILES FROM PRIMARY TO STANDBY(M:\RMANBKP)

ON STANDBY DATABASE:
=====================

CONNECT TO RMAN:


RMAN>CATALOG START WITH 'LOCATION OF THE BACKEDUP FILES';

(FOR EX:CATALOG START WITH 'M:\RMANBKP')

RMAN>RECOVER DATABASE NOREDO;

(AFTER SUCCESSFUL COMPLETION PROCEED TO NEXT STEP IF ANY ERRORS KINDLY CHECK FOR THE SAME)

ON PRIMARY DATABASE:
====================

RMAN>run
{
allocate channel c1 device type disk;
BACKUP CURRENT CONTROLFILE FOR STANDBY FORMAT 'LOCATION\ForStandbyCTRL.bck'tag 'FORSTANDBY';
}

COPY THE CONTROLFILE BACKUP TO STANDBY USING OS COMMAND.


ON STANDBY DATABASE:
====================


RMAN>SHUTDOWN IMMEDIATE;
RMAN>STARTUP NOMOUNT;
RMAN>RESTORE STANDBY CONTROLFILE FROM 'LOCATION\ForStandbyCTRL.bck';
RMAN>SHUTDOWN IMMEDIATE;
RMAN>STARTUP MOUNT;


ON STANDBY DATABASE:
====================
PUT THE STANDBY DATABASE IN RECOVERY MODE:
===========================================
SQL>ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT FROM SESSION;




Tuning SQL queries

For the Oracle databases version 10.x.x.x or later


TO CREATE TUNNING TASK:


DECLARE
  l_sql_tune_task_id  VARCHAR2(100);
BEGIN
  l_sql_tune_task_id := DBMS_SQLTUNE.create_tuning_task (
                          sql_id      => 'sql_id',
                          scope       => DBMS_SQLTUNE.scope_comprehensive,
                          time_limit  => 180,
                          task_name   => 'task_name',
                          description => 'Tuning task for statement sql_id .');
  DBMS_OUTPUT.put_line('l_sql_tune_task_id: ' || l_sql_tune_task_id);
END;
/

Note:

sql_id       --->sql_id of the query to be tuned which you can get it from AWR report or ADDM report
Task_name---> Name of the task (can be any name as per your choice)


TO CHECK STATUS :


SELECT task_name, STATUS FROM dba_advisor_log WHERE task_name ='task1';


TO EXECUTE TASK  

EXEC DBMS_SQLTUNE.execute_tuning_task(task_name => 'task1');


TO CHECK STATUS COMPLETED :

SELECT task_name, STATUS FROM dba_advisor_log WHERE task_name ='task1';


TO GET OUTPUT:

SET LINES 150
SET pages 50000
SET long 5000000
SET longc 5000000
SELECT DBMS_SQLTUNE.report_tuning_task('task1') AS recommendations FROM dual;


After Checking the recommendations from the above query don't forget to drop the task using below procedure

TO DROP THE TASK:

BEGIN
  DBMS_SQLTUNE.drop_tuning_task (task_name => 'task1');
END;
/





Oracle 12C Features




Oracle 12c New Features:


Part I covers:
  1. Online migration of an active data file
  2. Online table partition or sub-partition migration
  3. Invisible column
  4. Multiple indexes on the same column
  5. DDL logging
  6. Temporary undo in- and- outs
  7. New backup user privilege
  8. How to execute SQL statement in RMAN
  9. Table level recovery in RMAN
  10. Restricting PGA size

1. Online rename and relocation of an active data file

Unlike in the previous releases, a data file migration or renaming in Oracle database 12c R1 no longer requires a number of steps i.e. putting the tablespace in READ ONLY mode, followed by data file offline action. In 12c R1, a data file can be renamed or moved online simply using the ALTER DATABASE MOVE DATAFILE SQL statement. While the data file is being transferred, the end user can perform queries, DML and DDL tasks. Additionally, data files can be migrated between storages e.g. from non-ASM to ASM and vice versa.
Rename a data file:
SQL> ALTER DATABASE MOVE DATAFILE '/u00/data/users01.dbf' TO '/u00/data/users_01.dbf';
Migrate a data file from non-ASM to ASM:
SQL> ALTER DATABASE MOVE DATAFILE '/u00/data/users_01.dbf' TO '+DG_DATA';
Migrate a data file from one ASM disk group to another:
SQL> ALTER DATABASE MOVE DATAFILE '+DG_DATA/DBNAME/DATAFILE/users_01.dbf ' TO '+DG_DATA_02';
Overwrite the data file with the same name, if it exists at the new location:
SQL> ALTER DATABASE MOVE DATAFILE '/u00/data/users_01.dbf' TO '/u00/data_new/users_01.dbf' REUSE;
Copy the file to a new location whilst retaining the old copy in the old location:
SQL> ALTER DATABASE MOVE DATAFILE '/u00/data/users_01.dbf' TO '/u00/data_new/users_01.dbf' KEEP;
You can monitor the progress while a data file being moved by querying the v$session_longops dynamic view. Additionally, you can also refer the alert.log of the database where Oracle writes the details about action being taken place.

2. Online migration of table partition or sub-partition

Migration of a table partition or sub-partition to a different tablespace no longer requires a complex procedure in Oracle 12c R1. In a similar way to how a heap (non-partition) table online migration was achieved in the previous releases, a table partition or sub-partition can be moved to a different tablespace online or offline. When an ONLINE clause is specified, all DML operations can be performed without any interruption on the partition|sub-partition which is involved in the procedure. In contrast, no DML operations are allowed if the partition|sub-partition is moved offline.
Here are some working examples:
SQL> ALTER TABLE table_name  MOVE PARTITION|SUBPARTITION partition_name  TO tablespace tablespace_name;
SQL> ALTER TABLE table_name  MOVE PARTITION|SUBPARTITION partition_name  TO tablespace tablespace_name UPDATE INDEXES ONLINE;
The first example is used to move a table partition|sub-partition to a new tablespace offline. The second example moves a table partition/sub-partitioning online maintaining any local/global indexes on the table. Additionally, no DML operation will get interrupted when ONLINE clause is mentioned.
Important notes:
  • The UPDATE INDEXES clause will avoid any local/global indexes going unusable on the table.
  • Table online migration restriction applies here too.
  • There will be locking mechanism involved to complete the procedure, also it might leads to performance degradation and can generate huge redo, depending upon the size of the partition, sub-partition.

3. Invisible columns

In Oracle 11g R1, Oracle introduced a couple of good enhancements in the form of invisible indexes and virtual columns. Taking the legacy forward, invisible column concepts has been introduced in Oracle 12c R1. I still remember, in the previous releases, to hide important data –columns from being displayed in the generic queries– we used to create a view hiding the required information or apply some sort of security conditions.
In 12c R1, you can now have an invisible column in a table. When a column is defined as invisible, the column won’t appear in generic queries, unless the column is explicitly referred to in the SQL statement or condition, or DESCRIBED in the table definition. It is pretty easy to add or modify a column to be invisible and vice versa:
SQL> CREATE TABLE emp (eno number(6), ename name varchar2(40), sal number(9) INVISIBLE);
SQL> ALTER TABLE emp MODIFY (sal visible);
You must explicitly refer to the invisible column name with the INSERT statement to insert the database into invisible columns. A virtual column or partition column can be defined as invisible too. However, temporary tables, external tables and cluster tables won’t support invisible columns.

4. Multiple indexes on the same column

Pre Oracle 12c, you can’t create multiple indexes either on the same column or set of columns in any form. For example, if you have an index on column {a} or columns {a,b}, you can’t create another index on the same column or set of columns in the same order. In 12c, you can have multiple indexes on the same column or set of columns as long as the index type is different. However, only one type of index is usable/visible at a given time. In order to test the invisible indexes, you need to set theoptimizer_use_use_invisible_indexes=true.
Here’s an the example:
SQL> CREATE INDEX emp_ind1 ON EMP(ENO,ENAME);
SQL> CREATE BITMAP INDEX emp_ind2 ON EMP(ENO,ENAME) INVISIBLE;

5. DDL logging

There was no direction option available to log the DDL action in the previous releases. In 12cR1, you can now log the DDL action into xml and log files. This will be very useful to know when the drop or create command was executed and by who. TheENABLE_DDL_LOGGING initiation parameter must be configured in order to turn on this feature. The parameter can be set at the database or session levels. When this parameter is enabled, all DDL commands are logged in an xml and a log file under the $ORACLE_BASE/diag/rdbms/DBNAME/log|ddl location. An xml file contains information, such as DDL command, IP address, timestamp etc. This helps to identify when a user or table dropped or when a DDL statement is triggered.
To enable DDL logging
SQL> ALTER SYSTEM|SESSION SET ENABLE_DDL_LOGGING=TRUE;
The following DDL statements are likely to be recorded in the xml/log file:
  • CREATE|ALTER|DROP|TRUNCATE TABLE
  • DROP USER
  • CREATE|ALTER|DROP PACKAGE|FUNCTION|VIEW|SYNONYM|SEQUENCE

6. Temporary Undo

Each Oracle database contains a set of system related tablespaces, such as, SYSTEM, SYSAUX, UNDO & TEMP, and each are used for different purposes within the Oracle database. Pre Oracle 12c R1, undo records generated by the temporary tables used to be stored in undo tablespace, much similar to a general/persistent table undo records. However, with the temporary undo feature in 12c R1, the temporary undo records can now be stored in a temporary table instead of stored in undo tablespace. The prime benefits of temporary undo includes: reduction in undo tablespace and less redo data generation as the information won’t be logged in redo logs. You have the flexibility to enable the temporary undo option either at session level or database level.

Enabling temporary undo

To be able to use the new feature, the following needs to be set:
  • Compatibility parameter must be set to 12.0.0 or higher
  • Enable TEMP_UNDO_ENABLED initialization parameter
  • Since the temporary undo records now stored in a temp tablespace, you need to create the temporary tablespace with sufficient space
  • For session level, you can use: ALTER SESSION SET TEMP_UNDO_ENABLE=TRUE;

Query temporary undo information

The dictionary views listed below are used to view/query the information/statistics about the temporary undo data:
  • V$TEMPUNDOSTAT
  • DBA_HIST_UNDOSTAT
  • V$UNDOSTAT
To disable the feature, you simply need to set the following:
SQL> ALTER SYSTEM|SESSION SET TEMP_UNDO_ENABLED=FALSE;

7. Backup specific user privilege

In 11g R2, SYSASM privilege was introduced to perform ASM specific operations. Similarly, backup and recovery tasks specific privilege SYSBACKUP has been introduced in 12c to execute backup and recovery commands in Recovery Manager (RMAN). Therefore, you can create a local user in the database and grant the SYSBACKUP privilege to perform any backup and recovery related tasks in RMAN without being granting the SYSDBA privilege.
$ ./rman target "username/password as SYSBACKUP"

8. How to execute SQL statement in RMAN

In 12c, you can now execute any SQL and PL/SQL commands in RMAN without the need of a SQL prefix: you can execute any SQL and PLS/SQL commands directly from RMAN. How you can execute SQL statements in RMAN:
RMAN> SELECT username,machine FROM v$session;
 RMAN> ALTER TABLESPACE users ADD DATAFILE SIZE 121m;

9. Table or partition recovery in RMAN

Oracle database backups are mainly categorized into two types: logical and physical. Each backup type has its own pros and cons. In previous editions, it was not feasible to restore a table or partition using existing physical backups. In order to restore a particular object, you must have logical backup. With 12c R1, you can recover a particular table or partition to a point-in-time or SCN from RMAN backups in the event of a table drop or truncate.
When a table or partition recovery is initiated via RMAN, the following action is performed:
  • Required backup sets are identified to recover the table/partition
  • An auxiliary database will be configured to a point-in-time temporarily in the process of recovering the table/partition
  • Required table/partitions will be then exported to a dumpfile using the data pumps
  • Optionally, you can import the table/partitions in the source database
  • Rename option while recovery
An example of a table point-in-time recovery via RMAN (ensure you already have a full database backup from earlier):
 RMAN> connect target "username/password as SYSBACKUP";
 RMAN> RECOVER TABLE username.tablename UNTIL TIME 'TIMESTAMP…'
  AUXILIARY DESTINATION '/u01/tablerecovery'
  DATAPUMP DESTINATION '/u01/dpump'
  DUMP FILE 'tablename.dmp'
  NOTABLEIMPORT    -- this option avoids importing the table automatically.
REMAP TABLE 'username.tablename': 'username.new_table_name';    -- can rename table with this option.
Important notes:
  • Ensure sufficient free space available under /u01 filesystem for auxiliary database and also to keep the data pump file
  • A full database backup must be exists, or at least the SYSTEM related tablespaces
The following limitations/restrictions are applied on table/partition recovery in RMAN:
  • SYS user table/partition can’t be recovered
  • Tables/partitions stored under SYSAUX and SYSTEM tablespaces can’t be recovered
  • Recovery of a table is not possible when REMAP option used to recovery a table that contains NOT NULL constraints

10. Restricting PGA size

Pre Oracle 12c R1, there was no option to limit and control the PGA size. Although, you set a certain size toPGA_AGGREGATE_TARGET initialization parameter, Oracle could increase/reduce the size of the PGA dynamically based on the workload and requirements. In 12c, you can set a hard limit on PGA by enabling the automatic PGA management, which requires PGA_AGGREGATE_LIMIT parameter settings. Therefore, you can now set the hard limit on PGA by setting the new parameter to avoid excessive PGA usage.
SQL> ALTER SYSTEM SET PGA_AGGREGATE_LIMIT=2G;
SQL> ALTER SYSTEM SET PGA_AGGREGATE_LIMIT=0; --disables the hard limit
Important notes:

When the current PGA limits exceeds, Oracle will automatically terminates/abort the session/process that holds the most untenable PGA memory.



Part 2 covers:
  1. Table partition maintenance enhancements
  2. Database upgrade improvements
  3. Restore/Recover data file over the network
  4. Data Pump enhancements
  5. Real-time ADDM
  6. Concurrent statistics gathering

1. Table partition maintenance enhancements

In Part I, I explained how to move a table partition or sub-partition to a different tablespace either offline or online. In this section, you will learn other enhancements relating to table partitioning.

Adding multiple new partitions

Before Oracle 12c R1, it was only possible to add one new partition at a time to an existing partitioned table. To add more than one new partition, you had to execute an individual ALTER TABLE ADD PARTITION statement to every new partition. Oracle 12c provides the flexibility to add multiple new partitions using a single ALTER TABLE ADD PARTITION command. The following example explains how to add multiple new partitions to an existing partitioned table:
SQL> CREATE TABLE emp_part
    (eno number(8), ename varchar2(40), sal number (6))
  PARTITION BY RANGE (sal)
  (PARTITION p1 VALUES LESS THAN (10000),
   PARTITION p2 VALUES LESS THAN (20000),
   PARTITION p3 VALUES LESS THAN (30000)
  );
Now lets add a couple of new partitions:
SQL> ALTER TABLE emp_part ADD PARTITION
  PARTITION p4 VALUES LESS THAN (35000),
  PARTITION p5 VALUES LESS THAN (40000);
In the same way, you can add multiple new partitions to a list and system partitioned table, provided that the MAXVALUEpartition doesn’t exist.

How to drop and truncate multiple partitions/sub-partitions

As part of data maintenance, you typically either use drop or truncate partition maintenance task on a partitioned table.  Pre 12c R1, it was only possible to drop or truncate one partition at a time on an existing partitioned table.  With Oracle 12c, multiple partitions or sub-partitions can be dropped or merged using a single ALTER TABLE table_name {DROP|TRUNCATE} PARTITIONS command.
The following example explains how to drop or truncate multiple partitions on an existing partitioned table:
SQL> ALTER TABLE emp_part DROP PARTITIONS p4,p5;
SQL> ALTER TABLE emp_part TRUNCATE PARTITONS p4,p5;
To keep indexes up-to-date, use the UPDATE INDEXES or UPDATE GLOBAL INDEXES clause, shown below:
SQL> ALTER TABLE emp_part DROP PARTITIONS p4,p5 UPDATE GLOBAL INDEXES;
SQL> ALTER TABLE emp_part TRUNCATE PARTITIONS p4,p5 UPDATE GLOBAL INDEXES;
If you truncate or drop a partition without the UPDATE GLOBAL INDEXES clause, you can query the columnORPHANED_ENTRIES in the USER_INDEXES or USER_IND_PARTITIONS dictionary views to find out whether the index contains any stale entries.

Splitting a single partition into multiple new partitions

The new enhanced SPLIT PARTITION clause in 12c will let you split a particular partition or sub-partition into multiple new partitions using a single command. The following example explains how to split a partition into multiple new partitions:
SQL> CREATE TABLE emp_part
    (eno number(8), ename varchar2(40), sal number (6))
  PARTITION BY RANGE (sal)
  (PARTITION p1 VALUES LESS THAN (10000),
   PARTITION p2 VALUES LESS THAN (20000),
   PARTITION p_max VALUES LESS THAN (MAXVALUE)
  );

SQL> ALTER TABLE emp_part SPLIT PARTITION p_max INTO
  (PARTITION p3 VALUES LESS THAN (25000),
   PARTITION p4 VALUES LESS THAN (30000), PARTITION p_max);

Merge multiple partitions into one partition

You can merge multiple partitions to a single partition using a single ALTER TBALE MERGE PARTITIONS statement:
SQL> CREATE TABLE emp_part
  (eno number(8), ename varchar2(40), sal number (6))
  PARTITION BY RANGE (sal)
  (PARTITION p1 VALUES LESS THAN (10000),
   PARTITION p2 VALUES LESS THAN (20000),
   PARTITION p3 VALUES LESS THAN (30000),
   PARTITION p4 VALUES LESS THAN (40000),
   PARTITION p5 VALUES LESS THAN (50000),
   PARTITION p_max (MAXVALUE)
  );

SQL> ALTER TABLE emp_part MERGE PARTITIONS p3,p4,p5 INTO PARTITION p_merge;
If the range falls in the sequence, you can use the following example:
SQL> ALTER TABLE emp_part MERGE PARTITIONS p3 TO p5 INTO PARTITION p_merge;

2. Database upgrade improvements

Whenever a new Oracle version is announced, the immediate challenge that every DBA confronts is the upgrade process. In this section, I will explain the two new improvements introduced for upgrading to 12c.

Pre-upgrade script

A new and much improved pre-upgrade information script, preupgrd.sql, replaces the legacy utlu[121]s.sql script in 12c R1. Apart from the preupgrade checks verification, the script is capable of addressing the various issues – in the form of fixup scripts – that are raised during the pre-post upgrade process.
The fixup scripts that are generated can be executed to resolve the problems at different levels, for example, pre-upgrade and post upgrade. When upgrading the database manually, the script must be executed manually before initiating the actual upgrade procedure. However, when the Database Upgrade Assistant (DBUA) tool is used to perform a database upgrade, it automatically executes the pre-upgrade scripts as part of the upgrade procedure and will prompt you to execute the fixup scripts in case of any errors that are reported.
The following example demonstrates how to execute the scripts:
SQL> @$ORACLE_12GHOME/rdbms/admin/preupgrd.sql
The above script generates a log file and a [pre/post]upgrade_fixup.sql script.  All these files are located under the$ORACLE_BASE/cfgtoollogs directory. Before you continue with the real upgrade procedure, you should run through the recommendations mentioned in the log file and execute the scripts to fix any issues.
Note: Ensure you copy the preupgrd.sql and utluppkg.sql scripts from the 12c Oracle home/rdbms/admin directory to the current Oracle database/rdbms/admin location.

Parallel-upgrade utility

The database upgrade duration is directly proportional to the number of components that are configured on the database, rather than the database size. In previous releases, there was no direct option or workaround available to run the upgrade process in parallel to quickly complete the overall upgrade procedure.
The catctl.pl (parallel-upgrade utility) that replaces the legacy catupgrd.sql script in 12c R1 comes with an option to run the upgrade procedure in parallel mode to improve the overall duration required to complete the procedure.
The following procedure explains how to initiate the parallel (with 3 processes) upgrade utility; you need to run this after you STARTUP the database in UPGRADE mode:
cd $ORACLE_12_HOME/perl/bin
$ ./perl catctl.pl –n 3 -catupgrd.sql
The above two steps need to be run explicitly when a database is upgraded manually. However, the DBUA inherits the both new changes.

3. Restore/Recover data files over the network

Yet another great enhancement in 12c R1. You can now restore or recover a data file, control file, spfile, tablespace or entire database between primary and standby databases using a SERVICE name. This is particularly useful to synchronize the primary and standby databases.
When there is a pretty long gap found between the primary and standby database, you no longer require the complex roll-forward procedure to fill the gap between the primary and standby. RMAN is able to perform standby recovery getting the incremental backups through the network and applying them to the physical standby database. Having said that, you can directly copy the required data files from the standby location to the primary site using the SERVICE name e.g. in the case of a data file, tablespace lost on the primary database, or without actually restoring the data files from a backup set.
The following procedure demonstrates how to perform a roll forward using the new features to synchronize the standby database with its primary database:
On the physical standby database:
./rman target "username/password@standby_db_tns as SYSBACKUP"
RMAN> RECOVER DATABASE FROM SERVICE primary_db_tns USING COMPRESSED BACKUPSET;
The above example uses the primary_db_tns connect string defined on the standby database, connects to the primary database, performs an incremental backup, transfers these incremental backups over standby destination, and then applies these files to the standby database to synchronize the standby. However, you need to ensure you have configuredprimary_db_tns to point to the primary database on the standby database side.
In the following example, I will demonstrate a scenario to restore a lost data file on the primary database by fetching the data file from the standby database:
On the primary database:
./rman target "username/password@primary_db_tns as SYSBACKUP"
RMAN> RESTORE DATAFILE ‘+DG_DISKGROUP/DBANME/DATAFILE/filename’ FROM SERVICE standby_db_tns;

4. Data Pump enhancements

This part of the section will focus on the important enhancements introduced in data pumps. There are quite a few useful additions, such as converting view into a table while exporting and turning off logging while import.

Turn off redo log generation

The new TRANSFORM option introduced in data pumps import provides the flexibility to turn off the redo generation for the objects during the course of import. When DISABLE_ARCHIVE_LOGGING values is specified with the TRANSFORM option, redo generation for the objects in the context will be turned off during the entire import duration. This feature provides a great relief when importing large tables, and reduces the excessive redo generation, which results in quicker imports. This attribute applies to tables and indexes.
This example demonstrates this feature:
$ ./impdp directory=dpump dumpfile=abcd.dmp logfile=abcd.log TRANSFORM=DISABLE_ARCHIVE_LOGGING:Y

Transport view as table

This is another improvement in the data pumps. With the new VIEWS_AS_TABLES option, you can unload the view data into a table. The following example describes how to unload views data into a table during export:
$ ./expdp directory=dpump dumpfile=abcd.dmp logfile=abcd.log views_as_tables=my_view:my_table

5. Real-time ADDM analysis

Analyzing past and current database health statuses through a set of automatic diagnostic tools such as AWR, ASH and ADDM is part of every DBAs life. Though each individual tool can be used at various levels to measure the database’s overall heath and performance, no tool can be used when the database is unresponsive or totally hung.
When you encounter an unresponsive database or hung state, and if you have configured Oracle Enterprise Manager 12c Cloud Control, you can diagnose serious performance issues. This would give you a good picture about what’s currently going on in the database, and might also provide a remedy to resolve the issue.
The following step-by-step procedure demonstrates how to analyze the situation on the Oracle EM 12c Cloud Control :
  • Select the Emergency Monitoring option from the Performance menu on the Access the Database Home page.This will show the top blocking sessions in the Hang Analysis table.
  • Select the Real-Time ADDM option from the Performance to perform Real-time ADDM analysis.
  • After collecting the performance data, click on the Findings tab to get the interactive summary of all the findings.

6. Gathering  statistics concurrently on multiple tables

In previous Oracle database editions, whenever you execute a DBMS_STATS procedure to gather table, index, schema or database level statistics, Oracle used to collect stats one table at a time. If the table is big enough, then increasing the parallelism was recommended. With 12c R1, you can now collect stats on multiple tables, partitions and sub partitions concurrently.  Before you start using it, you must set the following at the database level to enable the feature:
SQL> ALTER SYSTEM SET RESOURCE_MANAGER_PLAN='DEFAULT_MAIN';
SQL> ALTER SYSTEM SET JOB_QUEUE_PROCESSES=4;

SQL> EXEC DBMS_STATS.SET_GLOBAL_PREFS('CONCURRENT', 'ALL');

SQL> EXEC DBMS_STATS.GATHER_SCHEMA_STATS('SCOTT');


Part 3 covers:
  1. Additions/Enhancements in ASM
  2. Additions/Enhancements in Grid Infrastructure
  3. Additions/Enhancements in Real Application Cluster (database)

1. Additions/Enhancements in Automatic Storage Management (ASM)

Flex ASM

In a typical Grid Infrastructure installation, each node will have its own ASM instance running and act the as the storage container for the databases running on the node. There is a single point-of-failure threat with this setup. For instance, if the ASM instance on the node suffers or fails all the databases and instances running on the node will be impacted. To avoid ASM instance single-point-failure, Oracle 12c provides a Flex ASM feature. The Flex ASM is a different concept and architecture all together. Only a fewer number of ASM Instances need to run on a group of servers in the cluster. When an ASM instance fails on a node, Oracle Clusterware automatically starts surviving (replacement) ASM instance on a different node to maintain availability. In addition, this setup also provides ASM instance load balancing capabilities for the instances running on the node. Another advantage of Flex ASM is that it can be configured on a separate node.
When you choose Flex Cluster option as part of the cluster installation, Flex ASM configuration will be automatically selected as it is required by the Flex Cluster. You can also have traditional cluster over Flex ASM. When you decide to use Flex ASM, you must ensure the required networks are available.  You can choose the Flex ASM storage option as part of Cluster installation, or use ASMCA to enable Flex ASM in a standard cluster environment.
The following command shows the current ASM mode:
$ ./asmcmd showclustermode
$ ./srvctl config asm
Or connect to the ASM instances and query the INSTANCE_TYPE parameter. If the output value is ASMPROX, then, the Flex ASM is configured.

Increased ASM storage limits

The ASM storage hard limits on maximum ASM disk groups and disk size has been drastically increased. In 12cR1, ASM support 511 ASM disk groups against 63 ASM disk groups in 11gR2. Also, an ASM disk can be now 32PB size against 20PB in 11gR2.

Tuning ASM rebalance operations

The new EXPLAIN WORK FOR statement in 12c measures the amount of work required for a given ASM rebalance operation and inputs the result in V$ASM_ESTIMATE dynamic view. Using the dynamic view, you can adjust the POWER LIMIT clause to improve the rebalancing operation work. For example, if you want to measure the amount of work required for adding a new ASM disk, before actually running the manual rebalance operation, you can use the following:
SQL> EXPLAIN WORK FOR ALTER DISKGROUP DG_DATA ADD DISK data_005;

SQL> SELECT est_work FROM V$ASM_ESTIMATE;

SQL> EXPLAIN WORK SET STATEMENT_ID='ADD_DISK' FOR ALTER DISKGROUP DG_DATA AD DISK data_005;

SQL> SELECT est_work FROM V$ASM_ESTIMATE WHERE STATEMENT_ID = 'ADD_DISK’;
You can adjust the POWER limit based on the output you get from the dynamic view to improve the rebalancing operations.

ASM Disk Scrubbing

The new ASM Disk Scrubbing operation on a ASM diskgroup with normal or high redundancy level, verifies the logical data corruption on all ASM disks of that ASM diskgroup, and repairs the logical corruption automatically, if detected, using the ASM mirror disks. The disk scrubbing can be performed at disk group, specified disk or on a file and the impact is very minimal. The following examples demonstrate the disk scrubbing scenario:
SQL> ALTER DISKGROUP dg_data SCRUB POWER LOW:HIGH:AUTO:MAX;
SQL> ALTER DISKGROUP dg_data SCRUB FILE '+DG_DATA/MYDB/DATAFILE/filename.xxxx.xxxx'
  REPAIR POWER AUTO;

Active Session History (ASH) for ASM

The V$ACTIVE_SESSION_HISOTRY dynamic view now provides the active session sampling on ASM instance too. However, the use of diagnostic pack is subject to the license.

2. Additions/Enhancements in Grid Infrastructure

Flex Clusters

Oracle 12c support two types of cluster configuration at the time of Clusterware installation: Traditional Standard Cluster and Flex cluster. In a traditional standard cluster, all nodes in a cluster are tightly integrated to each other and interact through a private network and can access the storage directly. On the other hand, the Flex Cluster introduced two types of nodes arranged in Hub and Leaf nodes architecture. The nodes arranged in Hub nodes category are similar to the traditional standard cluster, i.e. they are interconnected to each other through a private network and have the directly storage read/write access. The Leaf nodes are different from the Hub nodes. They don’t need to have direct access to the underlying storage; rather they access the storage/data through Hub nodes.
You can configure Hub nodes up to 64, and Leaf nodes can be many. In an Oracle Flex Cluster, you can have Hub nodes without having Leaf nodes configured, but no Leaf nodes exist without Hub nodes. You can configure multiple Leaf nodes to a single Hub node.  In Oracle Flex Cluster, only Hub nodes will have direct access to the OCR/Voting disks.  When you plan large scale Cluster environments, this would be a great feature to use. This sort of setup greatly reduces interconnect traffic, provides room to scale up the cluster to the traditional standard cluster.
There are two ways to deploy the Flex Cluster:
  1. While configuring a brand new cluster
  2. Upgrade a standard cluster mode to Flex Cluster
If you are configuring a brand new cluster, you need to choose the type of cluster configuration during step 3, select Configure a Flex Cluster option and you will have to categorize the Hub and Leaf nodes on Step 6. Against each node, select the Role, Hub or Leaf, and optionally Virtual Hostname too.
The following steps are required to convert a standard cluster mode to Flex Cluster mode:
1. Get the current status of the cluster using the following command:
$ ./crsctl get cluster mode status
2. Run the following command as the root user:
$ ./crsctl set cluster mode flex
$ ./crsctl stop crs
$ ./crsctl start crs –wait
3. Change the node role as per your design
$ ./crsctl get node role config
$ ./crsctl set node role hub|leaf
$ ./crsctl stop crs
$ ./crsctl start crs -wait
Note the following:
  • You can’t revert back from Flex to Standard cluster mode
  • Cluster node mode change requires cluster stack stop/start
  • Ensure GNS is  configured with a fixed VIP

OCR backup in ASM disk group

With 12c, OCR can be now be backed-up in ASM disk group. This simplifies the access to the OCR backup files across all nodes. In case of OCR restore, you don’t need to worry about which node the OCR latest backup is on. One can simply identify the latest backup stored in the ASM from any node and can perform the restore easily.
The following example demonstrates how to set the ASM disk group as OCR backup location:
$ ./ocrconfig -backuploc +DG_OCR

IPv6 support

With Oracle 12c, Oracle now supports IPv4 and IPv6 network protocol configuration on the same network. You can now configure public network (Public/VIP) either on IPv4, IPv6 or combination protocol configuration. However, ensure you use the same set of IP protocol configuration across all nodes in a cluster.

3. Additions/Enhancements in RAC (database)

What-If command evaluation

Using the new What-if command evaluation (-eval) option with srvctl, one can now determine the impact of running the command. This new addition to the srvctl command, will let you simulate the command without it actually being executed or making any changes to the current system. This is particularly useful in a situation when you want to make a change to an existing system and you’re not sure of the outcome.  Therefore, the command will provide the effect of making the change. The–eval option also can be used with crsctl command.
For example, if you want to know what will happen if you stop a particular database, you can use the following example:
$ ./srvctl stop database –d MYDB –eval
$ ./crsctl eval modify resource <resource_name> -attr “value”

Miscellaneous srvctl improvements

There are a few new additions to the srvctl command. The following demonstrates the new addition to stop/start database/instance resources on the cluster:
srvctl start database|instance –startoption NOMOUNT|MOUNT|OPEN
srvctl stop database|instance –stopoption NOMOUNT|MOUNT|OPEN
The next article will focus on top most developers’ features on 12c.
Part 4 covers:
  • How to truncate a master table while child tables contain data
  • Limiting ROWS for Top-N query results
  • Miscellaneous SQL*Plus enhancements
  • Session level sequences
  • WITH clause improvements
  • Extended data types

Truncate table CASCADE

In the previous releases, there wasn’t a direct option provided to truncate a master table while it is referred to by the child tables and child records exist. The TRUNCATE TABLE with CASCADE option in 12c truncates the records in the master table and automatically initiates recursive truncate on child tables too, subject to foreign key reference as DELETE ON CASCADE. There is no CAP on the number of recursive levels as it will apply on all child, grand child and great grandchild etc.
This enhancement gets rid of the prerequisite to truncate all child records before truncating a master table. The new CASCADEclause can also be applied on table partitions and sub-partitions etc.
SQL> TRUNCATE TABLE <table_name> CASCADE;

SQL> TRUNCATE TABLE <table_name> PARTITION <partition_name> CASCADE;
An ORA-14705 error will be thrown if no ON DELETE CASCADE option is defined with the foreign keys of the child tables.

ROW limiting for Top-N result queries

There are various indirect approaches/methods exist to fetch Top-N query results for top/bottom rows in the previous releases. In 12c, retrieving Top-N query results for top/bottom rows simplified and become straight forward with the new FETCH FIRST|NEXT|PERCENT clauses.
In order to retrieve top 10 salaries from EMP table, use the following new SQL statement:
SQL> SELECT eno,ename,sal FROM emp ORDER BY SAL DESC
    FETCH FIRST 10 ROWS ONLY;
The following example fetches all similar records of Nth row. For example, if the 10th row has salary of 5000 value, and there are other employees whose salary matches with the Nth value, the will also be fetched upon mentioning WITH TIES clause.
SQL> SELECT eno,ename,sal FROM emp ORDER BY SAL DESC
    FETCH FIRST 10 ROWS ONLY WITH TIES;
The following example limits the fetch to 10 per cent from the top salaries in the EMP table:
SQL> SELECT eno,ename,sal FROM emp ORDER BY SAL DESC
    FETCH FIRST 10 PERCENT ROWS ONLY;
The following example offsets the first 5 rows and will display the next 5 rows from the table:
SQL> SELECT eno,ename,sal FROM emp ORDER BY SAL DESC
    OFFSET 5 ROWS FETCH NEXT 5 ROWS ONLY;
All these limits can be very well used within the PL/SQL block too.
BEGIN
  SELECT sal BULK COLLECT INTO sal_v FROM EMP
    FETCH FIRST 100 ROWS ONLY;
END;

Miscellaneous SQL*Plus enhancements

Implicit Results on SQL*Plus: SQL*Plus in 12c returns results from an implicit cursor of a PL/SQL block without actually binding it to a RefCursor. The new dbms_sql.return_result procedure will return and formats the results of SELECT statement query specified within PL/SQL block. The following code descries the usage:
SQL> CREATE PROCEDURE mp1
                        as
   res1 sys_refcursor;
BEGIN
  open res1 for SELECT eno,ename,sal FROM emp;
  dbms_sql.return_result(res1);
END;

SQL> execute mp1;
When the procedure is executed, it return the formatted rows on the SQL*Plus.
Display invisible columns: In Part 1 of this series, I have explained and demonstrated about invisible columns new feature. When the columns are defined as invisible, they won’t be displayed when you describe the table structure. However, you can display the information about the invisible columns by setting the following on the SQL*Plus prompt:
SQL> SET COLINVISIBLE ON|OFF
The above setting is only valid for DESCRIBE command. It has not effect on the SELECT statement results on the invisible columns.

Session level sequences

A new SESSION level database sequence can be created now in 12c to support the session level sequence values. These types of sequences are most useful and suitable on global temporary tables that have session level existence.
Session level sequences produce a unique range of values that are limited within the session, not across the sessions. Once the session ends, the state of the session sequences also goes away. The following example explains creating a session level sequence:
SQL> CREATE SEQUENCE my_seq START WITH 1 INCREMENT BY 1 SESSION;

SQL> ALTER SEQUENCE my_seq GLOBAL|SESSION;
The CACHENOCACHEORDER or NOORDER clauses are ignored for SESSION level sequences.

WITH clause improvements

In 12c, you can have faster running PL/SQL function/procedure in SQL, that are defined and declared within the WITH clause of SQL statements. The following examples demonstrate how to define and declare a procedure or function within the WITHclause:
WITH
  PROCEDURE|FUNCTION test1 (…)
  BEGIN
    <logic>
  END;
SELECT <referece_your_function|procedure_here> FROM table_name;
/
Although you can’t use the WITH clause directly in the PL/SQL unit, it can be referred through a dynamic SQL within that PL/SQL unit.

Extended data types

In 12c, the data type VARCHAR2NAVARCHAR2, and RAW size will support up to 32,767 bytes in contrast to 4,000 and 2,000 in the earlier releases. The extended character size will reduce the use of going for LOB data types, whenever possible. In order to enable the extended character size, you will have to set the MAX_STRING_SIZE initialization database parameter toEXTENDED.
The following procedure need to run to use the extended data types:
  1. Shutdown the database
  2. Restart the database in UPGRADE mode
  3. Modify the parameter: ALTER SYSTEM SET MAX_STRING_SIZE=EXTENDED;
  4. Execute utl32k.sql as sysdba : SQL> @?/rdbms/admin/utl32k.sql
  5. Shutdown the database
  6. Restart the database in READ WRITE mode
In contrast to LOB data types, the extended data types columns in ASSM tablespace management are stored as SecureFiles LOBs, and in non-ASSM tablespace management they stored as BasciFiles LOBs.
Note: Once modified, you can’t change the settings back to STANDARD.