Pages

Tuesday, December 22, 2020

Script to Track The RMAN Backup Status

Script to Track The RMAN Backup Status  



set lines 1000
set pages 1000
col cf for 9,999
col df for 9,999
col elapsed_seconds heading "ELAPSED|SECONDS"
col i0 for 9,999
col i1 for 9,999
col l for 9,999
col output_mbytes for 9,999,999 heading "OUTPUT MBYTES"
col session_recid for 999999 heading "SESSION|RECID"
col session_stamp for 99999999999 heading "SESSION|STAMP"
col status for a10 trunc
col time_taken_display for a10 heading "TIME|TAKEN"
col output_instance for 9999 heading "OUT|INST"
col "SESSION RECID" for a15
col "OUTPUT|MBYTES" for a10
col start_time for a20
col end_time for a20
col input_type for a10
col dow for a15
col "ELAPSED|SECONDS" for a10
col "TIME|TAKEN" for a10

select j.session_recid, j.session_stamp, to_char(j.start_time, 'yyyy-mm-dd hh24:mi:ss') start_time,to_char(j.end_time, 'yyyy-mm-dd hh24:mi:ss') end_time, (j.output_bytes/1024/1024) output_mbytes, j.status, j.input_type, decode(to_char(j.start_time, 'd'), 1, 'Sunday', 2, 'Monday',3, 'Tuesday', 4, 'Wednesday', 5, 'Thursday', 6, 'Friday',7, 'Saturday') dow, j.elapsed_seconds, j.time_taken_display, x.cf, x.df, x.i0, x.i1, x.l, ro.inst_id output_instance from V$RMAN_BACKUP_JOB_DETAILS j left outer join (select d.session_recid, d.session_stamp, sum(case when d.controlfile_included = 'YES' then d.pieces else 0 end) CF, sum(case when d.controlfile_included = 'NO' and d.backup_type||d.incremental_level = 'D' then d.pieces else 0 end) DF, sum(case when d.backup_type||d.incremental_level = 'D0' then d.pieces else 0 end) I0, sum(case when d.backup_type||d.incremental_level = 'I1' then d.pieces else 0 end) I1, sum(case when d.backup_type = 'L' then d.pieces else 0 end) L from V$BACKUP_SET_DETAILS d join V$BACKUP_SET s on s.set_stamp = d.set_stamp and s.set_count = d.set_count where s.input_file_scan_only = 'NO' group by d.session_recid, d.session_stamp) x on x.session_recid = j.session_recid and x.session_stamp = j.session_stamp left outer join (select o.session_recid, o.session_stamp, min(inst_id) inst_id from GV$RMAN_OUTPUT o group by o.session_recid, o.session_stamp) ro on ro.session_recid = j.session_recid and ro.session_stamp = j.session_stamp where j.start_time > trunc(sysdate)-&NUMBER_OF_DAYS order by j.start_time;

Saturday, November 14, 2020

Script To Schedule RMAN Backups In Crontab

Script To Schedule RMAN Backups In Crontab


vi rman_full_bkp.sh

# $Header: rman_full_bkp.sh
# #################################################
# Script to be used on the crontab to schedule an RMAN Full Backup
# ################################################

# ###############################################
# VARIABLES To be Modified by the user to match the Environment:
# ###############################################

# INSTANCE Name: [Replace ${ORACLE_SID} with your instance SID]
ORACLE_SID=MQPROD

# ORACLE_HOME Location: [Replace ${ORACLE_HOME} with the right ORACLE_HOME path]
ORACLE_HOME=/u01/app/oracle/product/12.1.0/dbhome_1

# Backup Location: [Replace /backup/rmanfull with the backup location path]
BACKUPLOC=/u01/app/oracle/example_backups

# Backup LOG location:
RMANLOG=/u01/app/oracle/example_backups/rmanfull.log

# COMPRESSED BACKUP option:[Y|N] [Default ENABLED]
COMPRESSION=Y

# Perform Maintenance based on below Backup & Archivelog Retention: [Y|N] [Default DISABLED]
MAINTENANCEFLAG=N

# Backup Retention "In Days": [Backups older than this retention will be deleted]
BKP_RETENTION=7

# Archives Deletion "In Days": [Archivelogs older than this retention will be deleted]
ARCH_RETENTION=7

# ##################
# GENERIC VARIABLES: [Can be left without modification]
# ##################

# MAX BACKUP Piece Size: [Must be BIGGER than the size of the biggest datafile in the database]
MAX_BKP_PIECE_SIZE=1g

# Show the full DATE and TIME details in the backup log:
NLS_DATE_FORMAT='DD-Mon-YYYY HH24:MI:SS'

export ORACLE_SID
export ORACLE_HOME
export BACKUPLOC
export COMPRESSION
export BKP_RETENTION
export ARCH_RETENTION
export MAX_BKP_PIECE_SIZE
export RMANLOG
export NLS_DATE_FORMAT
export MAINTENANCEFLAG

# Check the selected COMPRESSION option:
case ${COMPRESSION} in
Y|y|YES|Yes|yes|ON|on)
COMPRESSED_BKP="AS COMPRESSED BACKUPSET"
export COMPRESSED_BKP
;;
*)
COMPRESSED_BKP=""
export COMPRESSED_BKP
;;
esac

# Check the selected MAINTENANCE option:
case ${MAINTENANCEFLAG} in
Y|y|YES|Yes|yes|ON|on)
HASH_MAINT=""
export HASH_MAINT
;;
*)
HASH_MAINT="#"
export COMPRESSED_BKP
;;
esac


# Append the date to the backup log for each script execution:
echo "----------------------------" >> ${RMANLOG}
date >> ${RMANLOG}
echo "----------------------------" >> ${RMANLOG}

# ###################
# RMAN SCRIPT Section:
# ###################

${ORACLE_HOME}/bin/rman target / msglog=${RMANLOG} <<EOF
# Configuration Section:
# ---------------------
${HASH_MAINT}CONFIGURE BACKUP OPTIMIZATION ON;
${HASH_MAINT}CONFIGURE CONTROLFILE AUTOBACKUP ON;
${HASH_MAINT}CONFIGURE CONTROLFILE AUTOBACKUP FORMAT FOR DEVICE TYPE DISK TO '${BACKUPLOC}/%F';
${HASH_MAINT}CONFIGURE SNAPSHOT CONTROLFILE NAME TO '${ORACLE_HOME}/dbs/snapcf_${ORACLE_SID}.f';
## Avoid Deleting archivelogs NOT yet applied on the standby: [When FORCE is not used]
#CONFIGURE ARCHIVELOG DELETION POLICY TO APPLIED ON ALL STANDBY;

# Maintenance Section:
# -------------------
## Crosscheck backups/copied to check for expired backups which are physically not available on the media:
${HASH_MAINT}crosscheck backup completed before 'sysdate-${BKP_RETENTION}' device type disk;
${HASH_MAINT}crosscheck copy completed before 'sysdate-${BKP_RETENTION}' device type disk;
## Report & Delete Obsolete backups which don't meet the RETENTION POLICY:
${HASH_MAINT}REPORT OBSOLETE RECOVERY WINDOW OF ${BKP_RETENTION} DAYS device type disk;
${HASH_MAINT}DELETE NOPROMPT OBSOLETE RECOVERY WINDOW OF ${BKP_RETENTION} DAYS device type disk;
## Delete All EXPIRED backups/copies which are not physically available:
${HASH_MAINT}DELETE NOPROMPT EXPIRED BACKUP COMPLETED BEFORE 'sysdate-${BKP_RETENTION}' device type disk;
${HASH_MAINT}DELETE NOPROMPT EXPIRED COPY COMPLETED BEFORE 'sysdate-${BKP_RETENTION}' device type disk;
## Crosscheck Archivelogs to avoid the backup failure:
${HASH_MAINT}CHANGE ARCHIVELOG ALL CROSSCHECK;
${HASH_MAINT}DELETE NOPROMPT EXPIRED ARCHIVELOG ALL;
## Delete Archivelogs older than ARCH_RETENTION days:
${HASH_MAINT}DELETE NOPROMPT archivelog all completed before 'sysdate -${ARCH_RETENTION}';

# Full Backup Script starts here: [Compressed+Controlfile+Archives]
# ------------------------------
run{
allocate channel F1 type disk;
allocate channel F2 type disk;
sql 'alter system archive log current';
BACKUP ${COMPRESSED_BKP}
#MAXSETSIZE ${MAX_BKP_PIECE_SIZE}
NOT BACKED UP SINCE TIME 'SYSDATE-2/24'
INCREMENTAL LEVEL=0
FORMAT '${BACKUPLOC}/%d_%t_%s_%p.bkp' 
FILESPERSET 100
TAG='FULLBKP'
DATABASE include current controlfile PLUS ARCHIVELOG NOT BACKED UP SINCE TIME 'SYSDATE-2/24';
## Backup the controlfile separately:
BACKUP ${COMPRESSED_BKP} CURRENT CONTROLFILE FORMAT '${BACKUPLOC}/CONTROLFILE_%d_%I_%t_%s_%p.bkp' TAG='CONTROLFILE_BKP' REUSE ;
## Trace backup of Controlfile & SPFILE:
SQL "ALTER DATABASE BACKUP CONTROLFILE TO TRACE AS ''${BACKUPLOC}/controlfile.trc'' REUSE";
SQL "CREATE PFILE=''${BACKUPLOC}/init${ORACLE_SID}.ora'' FROM SPFILE";
}
EOF


34 12 * * * "/home/oracle/rman_full_bkp.sh" > /tmp/rmanarch.log

Tue Oct 27 12:32:39 IST 2020





Different Ways To Check The Tablespaces Size

Different Ways To Check The Tablespace Size

 

Script 1.


set colsep |
set linesize 100 pages 100 trimspool on numwidth 14
col name format a25
col owner format a15
col "Used (GB)" format a15
col "Free (GB)" format a15
col "(Used) %" format a15
col "Size (M)" format a15

SELECT d.status "Status", d.tablespace_name "Name", TO_CHAR(NVL(a.bytes / 1024 / 1024 /1024, 0),'99,999,990.90') "Size (GB)", TO_CHAR(NVL(a.bytes - NVL(f.bytes, 0), 0)/1024/1024 /1024,'99999999.99') "Used (GB)", TO_CHAR(NVL(f.bytes / 1024 / 1024 /1024, 0),'99,999,990.90') "Free (GB)", TO_CHAR(NVL((a.bytes - NVL(f.bytes, 0)) / a.bytes * 100, 0), '990.00') "(Used) %" FROM sys.dba_tablespaces d, (select tablespace_name, sum(bytes) bytes from dba_data_files group by tablespace_name) a, (select tablespace_name, sum(bytes) bytes from dba_free_space group by tablespace_name) f WHERE d.tablespace_name = a.tablespace_name(+) AND d.tablespace_name = f.tablespace_name(+) AND NOT (d.extent_management like 'LOCAL' AND d.contents like 'TEMPORARY') UNION ALL SELECT d.status "Status", d.tablespace_name "Name", TO_CHAR(NVL(a.bytes / 1024 / 1024 /1024, 0),'99,999,990.90') "Size (GB)", TO_CHAR(NVL(t.bytes,0)/1024/1024 /1024,'99999999.99') "Used (GB)", TO_CHAR(NVL((a.bytes -NVL(t.bytes, 0)) / 1024 / 1024 /1024, 0),'99,999,990.90') "Free (GB)",TO_CHAR(NVL(t.bytes / a.bytes * 100, 0), '990.00') "(Used) %" FROM sys.dba_tablespaces d, (select tablespace_name, sum(bytes) bytes from dba_temp_files group by tablespace_name) a, (select tablespace_name, sum(bytes_cached) bytes from v$temp_extent_pool group by tablespace_name) t WHERE d.tablespace_name = a.tablespace_name(+) AND d.tablespace_name = t.tablespace_name(+) AND d.extent_management like 'LOCAL' AND d.contents like 'TEMPORARY';

 

Script 2.

 

set lines 135
column TABLESPACE_NAME for a20
column FILE_NAME for a93
set lines 220 pages 220

 SELECT df.tablespace_name, df.size_mb, f.free_mb, df.max_size_mb, f.free_mb + (df.max_size_mb - df.size_mb) AS max_free_mb, df.file_name FROM   (SELECT file_id, file_name, tablespace_name, TRUNC(bytes/1024/1024) AS size_mb, TRUNC(GREATEST(bytes,maxbytes)/1024/1024) AS max_size_mb FROM   dba_data_files) df, (SELECT TRUNC(SUM(bytes)/1024/1024) AS free_mb, file_id FROM dba_free_space GROUP BY file_id) f WHERE  df.file_id = f.file_id (+) ORDER BY df.tablespace_name, df.file_name;

                                 

Script 3.                                 

                                 

select fs.tablespace_name "Tablespace", (df.totalspace - fs.freespace) "Used MB", fs.freespace "Free MB", df.totalspace "Total MB", round(100 * (fs.freespace / df.totalspace)) "Pct. Free" from (select tablespace_name, round(sum(bytes) / 1048576) TotalSpace from dba_data_files group by tablespace_name ) df, (select tablespace_name, round(sum(bytes) / 1048576) FreeSpace from dba_free_space group by tablespace_name ) fs where df.tablespace_name = fs.tablespace_name order by 1;

  

Script 4.  

 

set pages 80
set lin 120
set echo off
set feed off
column PCT format 999.99
column tbspce format A30
col container for a30

select substr(f.tablespace_name,1,30) tbspce, round(f.tsbytes/(1024*1024),0) "ALLOCATED(MB)", round(nvl(s.segbytes,0)/(1024*1024),0) "USED(MB)", round((nvl(s.segbytes,0)/f.tsbytes)*100,2) PCT, lower(vc.name) as container from (select con_id,tablespace_name,sum(bytes) tsbytes from cdb_data_files group by con_id,tablespace_name) f, (select con_id,tablespace_name,sum(bytes) segbytes from cdb_segments group by con_id,tablespace_name) s, v$containers vc where f.con_id=s.con_id(+) and f.tablespace_name=s.tablespace_name(+) and f.con_id=vc.con_id order by container, tbspce;

 

Script 5.

 



set pages 999
set lines 400

SELECT df.tablespace_name tablespace_name, max(df.autoextensible) auto_ext, round(df.maxbytes / (1024 * 1024), 2) max_ts_size,round((df.bytes - sum(fs.bytes)) / (df.maxbytes) * 100, 2) max_ts_pct_used,round(df.bytes / (1024 * 1024), 2) curr_ts_size,round((df.bytes - sum(fs.bytes)) / (1024 * 1024), 2) used_ts_size,round((df.bytes-sum(fs.bytes)) * 100 / df.bytes, 2) ts_pct_used,round(sum(fs.bytes) / (1024 * 1024), 2) free_ts_size, nvl(round(sum(fs.bytes) * 100 / df.bytes), 2) ts_pct_free FROM dba_free_space fs, (select tablespace_name, sum(bytes) bytes, sum(decode(maxbytes, 0, bytes, maxbytes)) maxbytes, max(autoextensible) autoextensible from dba_data_files group by tablespace_name) df WHERE fs.tablespace_name (+) = df.tablespace_name GROUP BY df.tablespace_name, df.bytes, df.maxbytes UNION ALL SELECT df.tablespace_name tablespace_name, max(df.autoextensible) auto_ext, round(df.maxbytes / (1024 * 1024), 2) max_ts_size, round((df.bytes - sum(fs.bytes)) / (df.maxbytes) * 100, 2) max_ts_pct_used, round(df.bytes / (1024 * 1024), 2) curr_ts_size, round((df.bytes - sum(fs.bytes)) / (1024 * 1024), 2) used_ts_size, round((df.bytes-sum(fs.bytes)) * 100 / df.bytes, 2) ts_pct_used, round(sum(fs.bytes) / (1024 * 1024), 2) free_ts_size, nvl(round(sum(fs.bytes) * 100 / df.bytes), 2) ts_pct_free FROM (select tablespace_name, bytes_used bytes from V$temp_space_header group by tablespace_name, bytes_free, bytes_used) fs, (select tablespace_name, sum(bytes) bytes, sum(decode(maxbytes, 0, bytes, maxbytes)) maxbytes, max(autoextensible) autoextensible from dba_temp_files group by tablespace_name) df WHERE fs.tablespace_name (+) = df.tablespace_name GROUP BY df.tablespace_name, df.bytes, df.maxbytes ORDER BY 4 DESC;

Oracle Rac Interview Questions

Oracle Rac Interview Questions


  1. How end-user can connect to the RAC Database?
  2. What is a Voting Disk and why we keep an odd number of VD?
  3. What is a split-brain syndrome in RAC? If this issue is there for a longer time, then what happens??
  4. Explain high-level steps to install Oracle 3 node RAC database?
  5. Explain patching types in oracle RAC and steps to apply a patch?
  6. Can we use a different Operating system when configuring RAC?
  7. Clusterware fencing is available from which version?
  8. What are the two methods of Clusterware communication?
  9. How many nodes we can configure in RAC?
  10. Can you use IPv6 for Private IP?
  11. Differences between Single Instance Vs RAC?
  12. What files store in the Clusterware?
  13. What is the File system that certified for RAC?
  14. What protocol used in the oracle RAC interconnect network?
  15. What do you need to plan before migrating Single Instance to RAC?
  16. How many nodes we can configure in RAC?
  17. Can we use a different Operating system when configuring RAC?
  18. Clusterware fencing is available from which version?
  19. What is two methods for Clusterware communication?
  20. How Many IP needed for 2 node RAC according to Oracle best practices?
  21. What is Cache Fusion?
  22. What is GRD?
  23. What is the role of the Voting Disk?
  24. How to check resources in the cluster?
  25. How to check clusters are up ON all nodes?
  26. How to check the cluster is UP on a particular node?
  27. How to bring Down the one specific node in RAC?
  28. What will you check if the query is running slow?
  29. DIfference between Bitmap & B-tree Index?
  30. Tell me Wait Event in RAC?
  31. How many disks in ASM Normal Redundancy?
  32. How to create Data Guard?
  33. How to fix the gap in Data Guard?
  34. Difference between DBFile Sequential and Scattered Reads?
  35. What is Fragmentation in Oracle Database?
  36. Table Fragmentation in Oracle Database?
  37. How to recover Table in oracle database?
  38. Can we transfer the data between Linux to windows?
  39. Difference between b-tree and bitmap index?
  40. What is gc cr request in RAC?
  41. What is the Library cache lock? When it will occur?
  42. How you solve Library cache lock issues?
  43. Library cache Lock issues In AWR which section you will look in to?
  44. In Library cache lock how connection management call elapsed time will impact?
  45. One user fire select statement for the long table after that one user drop that table. then what will happen for a select query?
  46. Rolling Upgrade in Oracle RAC?
  47. How to Fix the log switches problem?
  48. Oracle Database “19c New Features”?
  49. How to check ASM status?
  50. Have you done ASM metadata backup?
  51. What is SQL BaseLine?
  52. What are the two source attributes for SQL Base Line?
  53. How to Create a SQL Base Line?
  54. In one Table Index is there but Optimizer is not using index what will be the reason?
  55. What are Deadlocks? How it will be resolved?
  56. One user A Started a Transaction after that user B started the transaction on the same row in that table. So Deadlock occurs. Which user Transaction will rollback? DBA will rollback or it happens automatically?
  57. What are the Locks? Tell me the name Object Type of Lock?
  58. If user Fire Select query Statement so Lock will happen or not?
  59. When the user fires the Select statement than in backend how it will execute?
  60. What is the SQL patch? Why did we use it?


Oracle Database Interview Questions

Oracle Database Interview Questions

  1. What is the section you will look into in the AWR Report to improve the DB performance?
  2. What is the basic parameter you will set for RMAN?
  3. What is the troubleshooting you did in your daily activities?
  4. Why we do CF Multiplexing?
  5. How you can replace Optimizer EP for your Query?
  6. How you will get the datafile block corruption information?
  7. For SQL Query Improvement what you will do?
  8. For SQL Query Problem what section you will look into?
  9. What is Bind Peeking Variable?
  10. What is Free Buffer Busy Wait?
  11. How to change the DB name?
  12. What are things you look into when you are creating a Test Environment from a Production Environment? (loading production data in a test environment)
  13. How to attach any parameters in the running job if you don’t know ENCRYPTION_PASSWORD?(my dump file has an encryption password)
  14. How to Attaching a parameter to a running Job?
  15. How to Identifying data pump jobs?
  16. How to Refresh Database from PROD to TEST?
  17. What are the types of Hints we can use for Optimizer?
  18. In the Oracle SQL execution plan what columns you will look into?
  19. How to read & understand the EP?
  20. Why do bind variables matter for performance?
  21. If your standby database is lagging relative to the primary database, you should focus on which areas?
  22. How You Determining transport or apply lag is done on the standby database?
  23. How you Find local time on the standby database when the datum used to compute the metric was received it means (the lag metrics are computed based on data that's periodically received from the primary database)?
  24. How you will Find out the history of apply lag values since the standby instance was last started?
  25. How to monitor and assess redo apply performance?
  26. What are the RECOVERY PROCESS WAIT EVENTS in Data Guard?
  27. Which Script creates the standby statspack schema to hold the standby snapshots?
  28. Which script generates the standby statistics report?
  29. Which script is used to purge a set of snapshots in the Data Guard?
  30. Which script drops the stdbyperf user and tables?
  31. Can A logical standby accept archived logs while a rolling upgrade is taking place?
  32. How to determine whether any tables involved in a rolling upgrade performed with the
  33. DBMS_ROLLING PL/SQL package contains unsupported data types?
  34. Can we transport of redo data to a Recovery Appliance?
  35. For rolling upgrades which Procedure Support?
  36. Can we rename the data file on the standby site when the STANDBY_FILE_MANAGEMENT initialization parameter is set to AUTO?
  37. If the standby site is not receiving redo data, Which view you will check for error messages?
  38. Can we mount the standby database if the standby control file was not created?
  39. If you specify REOPEN for a MANDATORY destination, and redo data is not transmitted then what will happen on Primary Database?
  40. Which tool(Procedure) you will use for handling logical standby database failures?
  41. If the switchover does not complete successfully, which view you will check?
  42. Can we move an online datafile in a data guard from one physical file to another physical file while the database is actively accessing the file?
  43. Can we create a physical standby of a Multitenant container database(CDB)?
  44. Can we use SYNC & NOAFFIRM ATTRIBUTES use together in maximum availability mode?
  45. Using CURRENT LOGFILE clause is required to start real-time apply in data guard?
  46. Can we do DML operations on global temporary tables on Oracle Active Data Guard standbys?
  47. Can we use sequences in an Oracle Active Data Guard environment?
  48. When you perform a switchover from an Oracle RAC primary database to a physical standby database, it is necessary to shut down all but one primary database instance?
  49. Can we use Application Continuity Feature for Oracle Data Guard switchovers to physical standby databases?
  50. Can we use Application Continuity Feature for fast-start failover to physical standbys in maximum availability data protection mode?
  51. Can we create a logical standby of a CDB?
  52. What is the prerequisite required for data pump export and Import?
  53. If performance is slow in the Data pump, what will you do?
  54. How you will monitor the Data pump activity?
  55. What are the migration's high-level steps in Azure?
  56. What are the factors DBA should look into Migration?
  57. We are using RMAN backup for migration but it is taking too much time and we have 4 hrs window to migrate, What will be your approach and Will data guard be beneficial for this problem?
  58. How you will improve RMAN performance?
  59. Difference between OEM 12c and 13c?
  60. If we remove a database target from OEM does it remove the database from the host or just monitoring?
  61. How to remove an agent from the host?
  62. How does authentication happen in Oracle Database? Where it will authenticate in DB and OS system?
  63. Tell me about Oracle Architecture? & What are the Instance and Database?
  64. What is LGWR? How many LGWR is there, can we increase LGWR?
  65. If LGWR is slow then what could be the reason for that? How you will troubleshoot? What would the solution you could give?
  66. How you will export the 8i database to 12c? In testing, it is taking 2 hours and in production, now it is taking 4 hours what you will check? how you could improve the performance of this activity?
  67. How you will migrate the database using RMAN? If it is slow, how you will increase the performance?
  68. How you will Migrate using a Data pump?
  69. What is the prerequisite required for data pump export and Import?
  70. If performance is slow in the Data pump, what will you do?
  71. How you will monitor this Data pump activity?
  72. Data-pump is an optional process in GG, then why Oracle recommends it to use?
  73. How you will log in to the GG console?
  74. How will you check GG is running on the server or not?
  75. What you will check if the SELECT query is running slow?
  76. You generate an explain plan and found that full table scan, how you can avoid that, what is your suggestion on this?
  77. What is the difference between a full table scan and an index scan?
  78. Which one is better, a full table scan, or Index scan?
  79. What are db file sequential read  Wait Event & db file scattered read  Wait Event?
  80. If I have to update one column value, what will happen in the background? How many memory components and processes will involve in this transaction?
  81. Can we increase the DB writer process and MAX DB writer processes? What will be the benefit I will get if I increase it?
  82. What are the karnal parameters dba will set in the database Installation?
  83. What is shmmni, shmall & shmmax?
  84. What is MMAN, MMON processes?
  85. Data got deleted from a table, how you will recover?
  86. Can we perform the table recovery in 11g or not?
  87. The value of the Standby management parameter is manual, and activities are going on in production, what will you do to sync standby?
  88. What is the data guard protection mode? and what are the difference among all?
  89. What are the processes involved in Data guard?
  90. If the archive is not being applied on DG, from where you will start troubleshooting? and which is the dynamic view to check an error?
  91. What are the main processes of the golden gate?
  92. What is the difference between exp/imp and expdp/impdp?
  93. What is the difference between delete and truncate command?
  94. What is cache fusion? Which process do cache fusion?
  95. Earlier we had a buffer option to speed up logical backup. Which option do we use now in 11g?
  96. What is the ORA-01555 error?
  97. What is a flashback and what is mandatory for a flashback?
  98. We have two database D1 and D2 and the D1 database doesn't have space so how will you take an export of D1?
  99. You are using Toad and you are running procedure and you get a TNS error what is the reason?
  100. Where is the location of the RAC CRS cluster alert log?
  101. If the undo file is corrupted what will you do?
  102. How to create ACFs?
  103. What is async noaffirm?
  104. What is the backup location of OCR?
  105. What is the actual size and mirror size in V$ASM_DISKGROUP?
  106. How many IPs are configured in HAIP and why it is used and can we replace it with something?
  107. How to check if HAIP is up and down?
  108. Public and private are on the same subnet mask or different?
  109. What is block change tracking and when it is used before level 0,level1, or after that?
  110. How to set FRA?
  111. How to set a recycle bin?
  112. How to do schema refresh?
  113. How to check the backup completed or not?
  114. How to check which query is taking a long time?
  115. How to check tablespace is full or not?
  116. How to unlock the schema?
  117. How you will check about the patch?
  118. What is the snapshot too old error?
  119. If we take a full backup on Wednesday and after that, we take incremental backup on Tuesday and Friday, and on Sunday database crashed so how you will perform recovery?
  120. How to add datafile?
  121. How to resize tablespace?
  122. How to resize the logfile member?
  123. What happens if we kill SMON process?
  124. What is the diffrent b/w user and schema?
  125. What is the diffrent b/w rman back and import export?
  126. How to check the location of CRSD files?
  127. If we create a new database which parameter is important to check?
  128. What is the process of the startup?
  129. What is the different b/w incremental and cumulative backup?
  130. What is the hot and cold backup.?
  131. How to resize the redo log member?
  132. Tell me the mandatory background process?
  133. We have multiple control files in multiple locations. and we lost
  134. control file so how to recover control file?
  135. How to give permission to access the database?
  136. What is cloning?
  137. What is Rman cloning?
  138. What is a materialized view?
  139. What is the undo management? What happens if we delete some rows and give commit? How to rollback data?
  140. What is the prerequisite of RAC?
  141. What is cache fusion?
  142. What is the prerequisite of the data guard?
  143. What is Fragmentation in Oracle Database?
  144. What is Table Fragmentation in Oracle Database?
  145. Can we transfer the data between Linux to Windows?
  146. Difference between b-tree and bitmap index.
  147. How to troubleshoot a slow running query in Oracle?
  148. What is GC cr request in RAC?
  149. Which BR strategy is good for prod DB?
  150. 2 Tb schema how you will refresh?
  151. How will you ensure your patch is applied successfully?
  152. What are the minimum required parameters in pfile?
  153. What is Rolling Upgrade in Oracle RAC?
  154. How to Fix the log switches problem?
  155. Tell me Oracle Database “19c New Features”?
  156. How to check ASM status?
  157. How you will do ASM metadata backup?
  158. How many disks in ASM Normal Redundancy?
  159. How to fix the gap in the data guard?
  160. How to create a data guard?
  161. Difference between DBFile Sequential and Scattered Reads?
  162. How to recover Table in oracle database?
  163. If your current redo member got corrupted, will the database hang? If yes, how will you overcome this situation?
  164. If you have increment level 0 backup and you have to restore it on a fresh server, how will you do it?
  165. When do you propose a RAC solution to your business client? Why RAC environment is beneficial over a single instance database?
  166. What is the difference between maximum performance, maximum protection & maximum availability in Data Guard?
  167. What is the Default mode in Data Guard?
  168. What is the Difference between Incremental Differential and Incremental Cumulative backup?
  169. If LOB object is taking much time to export what will be your action?
  170. If node evictions occur? what will be your approach?
  171. How many voting disks are required to set up the RAC?
  172. If you have added datafile in the primary database but it was not added in standby due to space issue, then what will you do to correct it?
  173. If the Archiver process killed, then what will be the impact on the database?
  174. Why do you prefer the data pump over exp/imp?
  175. What is the parameter to make data pump operation faster?
  176. What is the parameter used to take consistent backup?
  177. What flashback_scn does?
  178. If you came in shift and you have to monitor an ongoing import job, how will you do it?
  179. How to resume a data pump job and from where it will take info?
  180. Tell me common wait events in the database?
  181. What precautions will you take before upgrading the production database?
  182. If your upgrade is failed for some reason, and the client wants the database to be running ASAP, how can you do that?
  183. If archives are not shipping to standby what are the different reasons?
  184. What is relation b/w DB time and elapsed time and no CPU?
  185. What is logfile sync?
  186. RAC installation steps?
  187. How to upgrade 11.2.0.1 to 11.2.0.3 database?
  188. How to apply rolling and non-rolling patches and auto patches?
  189. What will happen if grid unlocks?
  190. What are the issues faced in RAC?
  191. Why node eviction will occur?
  192. How to take the backup of OCR File and Voting Disk?
  193. How to run and read the execution plan?
  194. In the case of OS patch what will be your task as a dba?
  195. Difference between physical and logical standby?
  196. What is the Functionality of the dg broker?
  197. How do u monitor databases on a daily basis?
  198. What is automatic memory management?
  199. How to rename a datafile?
  200. If I set memory_max_target to 8gb and want to set memory_target to 10gb. How can I do it?
  201. If you have 10GB memory for your instance from which 4GB is allocated to SGA. Now I have made 20GB memory available for your instance and want to change SGA size to 8GB. How can you do it? Will there be any limitations to do this?
  202. How to check free space in ASM disk?
  203. How to take a backup of Voting Disk?
  204. How to check the location of the Voting Disk?
  205. How many IPs you have in 2 node RAC?
  206. What is SCAN?
  207. How the SCAN is useful in 11g?
  208. What is VIP?
  209. What is the use of Public IP, Private IP, VIP & SCAN IP use in RAC?
  210. How the Client gets Connected to Instance?
  211. If you have many instances in the RAC environment, how clients connect to a particular instance?
  212. How to check the index on a particular table?
  213. What is the difference between 11g and 12c RMAN?
  214. If your restoration got failed for some reason, then how you can resume it in RMAN?
  215. What action you will take to avoid Archive full issue?
  216. Where are redo logs stored in RAC? Locally or shared storage?
  217. How to identify the master node in RAC?
  218. If one node goes down, then services will failover to which node?
  219. What are OLR and OCR?
  220. Why we should have an odd number of voting disks?
  221. Explain the startup sequence of Clusterware?
  222. What happens in the background during switchover?
  223. What is Dynamic Sampling?
  224. What is the purpose of kernel parameters?
  225. What are diff b/w exp/imp and data pump?
  226. What is partitioning? & on what basis you will do the partitioning?
  227. Which parameters affect the performance of dB?
  228. How to increase the restore speed in database recovery?
  229. How to restore crashed undo?
  230. Why the data pump is fast?
  231. What is oracle restart?
  232. If the user has dropped the table, how will u recover it?



Friday, November 6, 2020

Difference Between Oracle Goldengate & Oracle Dataguard

Difference Between Oracle Goldengate & Oracle Dataguard






Thursday, October 29, 2020

Oracle SQL Developer Shortcuts

Oracle SQL Developer Shortcuts



  

Saturday, October 17, 2020

Troubleshooting Database Performance Flow

Troubleshooting Database Performance Flow





Friday, October 16, 2020

Administering the DDL Log Files in 12c

Administering the DDL Log Files in 12c

The DDL log is created only if the ENABLE_DDL_LOGGING initialization parameter is set to TRUE. When this parameter is set to FALSE, DDL statements are not included in any log. A subset of executed DDL statements is written to the DDL log.


How to administer the DDL Log?

--> Enable the capture of certain DDL statements to a DDL log file by setting ENABLE_DDL_LOGGING to TRUE.

--> DDL log contains one log record for each DDL statement.

--> Two DDL logs containing the same information:

--> XML DDL log: named log.xml

--> Text DDL: named ddl_<sid>.log


When ENABLE_DDL_LOGGING is set to true, the following DDL statements are written to the log:

ALTER/CREATE/DROP/TRUNCATE CLUSTER

ALTER/CREATE/DROP FUNCTION

ALTER/CREATE/DROP INDEX

ALTER/CREATE/DROP OUTLINE

ALTER/CREATE/DROP PACKAGE

ALTER/CREATE/DROP PACKAGE BODY

ALTER/CREATE/DROP PROCEDURE

ALTER/CREATE/DROP PROFILE

ALTER/CREATE/DROP SEQUENCE

CREATE/DROP SYNONYM

ALTER/CREATE/DROP/RENAME/TRUNCATE TABLE

ALTER/CREATE/DROP TRIGGER

ALTER/CREATE/DROP TYPE

ALTER/CREATE/DROP TYPE BODY

DROP USER

ALTER/CREATE/DROP VIEW


Example

$ more ddl_orcl.log

Thu Nov 15 08:35:47 2012

diag_adl:drop user app_user


Locate the DDL Log File

$ pwd

/u01/app/oracle/diag/rdbms/orcl/orcl/log

$ ls

ddl ddl_orcl.log debug test

$ cd ddl

$ ls

log.xml


Notes: 

- Setting the ENABLE_DDL_LOGGING parameter to TRUE requires licensing the Database Lifecycle Management Pack.

- This parameter is dynamic and you can turn it on/off on the go.

- alter system set ENABLE_DDL_LOGGING=true/false;


How to set oracle path in Linux

How to set oracle path in Linux 


[oracle@test04 ~]$vi MQTEST.env 

export ORACLE_HOME=/u01/app/oracle/product/12.1.0/dbhome_1

export PATH=$ORACLE_HOME/bin:$PATH:.

export ORACLE_SID=MQTEST

ALERT=/u01/app/oracle/diag/rdbms/mqprod/MQTEST/trace

:wq!

Different types of Shared Memory available in Solaris

Different types of Shared Memory available in Solaris


1) Shared Memory (SHM)

-basic anonymous shared memory

-also called Pageable Shared Memory

-pages can be swapped out during memory shortages which affects the application performance


2) Intimate Shared Memory (ISM)

-is used by default in Solaris

-cannot be dynamically resized

-if you want to modify (e.g. resize) a segment you must restart the processes

-does not need swap space

-segments are automatically locked by the Solaris kernel


3) Dynamic Intimate Shared Memory (DISM)

-allows shared memory segments to be adjusted dynamically (resized or relocated) (e.g. SGA can be resized) - no restart of the processes necessary

-shared memory segments can be locked by the applications by using mlock(3C)

-kernel virtual-to-physical memory address translation structures are shared between processes that attach to the DISM segment, saving kernel memory and CPU time

-needs swap space as it makes reservations on it


4) Optimized Shared Memory (OSM)

-the most recommended shared memory

-provides similar benefits as DISM but without some of the requirements of DISM


Reference metalink Doc ID 1010818.1


ggsci fails with error 'symbol lookup error' on new installation

ggsci fails with error 'symbol lookup error' on new installation


Symptoms:

Installing OGG12.3.0.2  , while invoking ggsci gets below error...

 ./ggsci: symbol lookup error: /optware/software/gg12_3/libggparam.so: undefined symbol:_ZZ11ggGetConfigE4pcfg

 

Cause:

OGG version 12.3 is not compatible with linux 5

 ++ Certification matrix link

https://mosemp.us.oracle.com/epmos/faces/CertifyResults?searchCtx=st%5EANY%7Cpa%5Epi%5E922_Oracle+GoldenGate%7Evi%5E435776%7Epln%5EAny%7E%7C&_afrLoop=246908612827636


Solution:

OGG 12.3 is not compatible with Linux 5.

To confirm, check OS version

> uname -a

You may use OGG 12.2 versions which supports Linux 5


Reference metalink Doc ID 2352254.1

./ggsci: error while loading shared libraries: libnnz12.so: cannot open shared object file: No such file or directory

./ggsci: error while loading shared libraries: libnnz12.so: cannot open shared object file: No such file or directory


Error:

[oracle@test04 oracle]$ ./ggsci

./ggsci: error while loading shared libraries: libnnz12.so: cannot open shared object file: No such file or directory


Solution:

[oracle@test04 oracle]$ echo $ORACLE_HOME

/u01/oracle/product/12.2.0/db_1

[oracle@test04 oracle]$ export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib


[oracle@test04 oracle]$ ./ggsci

Oracle GoldenGate Command Interpreter for Oracle

Version 12.1.2.0.0 17185003 OGGCORE_12.1.2.0.0_PLATFORMS_130924.1316_FBO

Linux, x64, 64bit (optimized), Oracle 12c on Sep 25 2013 02:33:54

Operating system character set identified as UTF-8.

Copyright (C) 1995, 2013, Oracle and/or its affiliates. All rights reserved.

GGSCI (test04.oracle.com) 1> 

Tuesday, October 13, 2020

Oracle SOA Suite Backup & Recovery

Oracle SOA Suite Backup & Recovery


Performing a  Full Offline Backup

To perform a full offline backup, you copy the directories that contain Oracle Fusion Middleware files.Archive and compress the source Middleware home, using your preferred tool for archiving. Ensure that the tool you are using preserves the permissions of the files. For example, for online backups on Windows, use copy; for offline backups on Windows, use copy, xcopy, or jar. Do not use Winzip because it does not work with long filenames or extensions.

For example, for Linux and UNIX, use tar.


1. Stop all the processes.

2. Back up the Middleware home (MW_HOME) on all hosts. For example:
tar -cf mw_home_backup.tar $MW_HOME/*

3. If the domain is not located within the Middleware home, back up the Administration Server domain separately. For Example
tar -cf domain_home_backup.tar $MW_HOME/user_projects/domains/domain_name/*

In most cases, you do not need to back up the Managed Server domain directories separately, because the Administration Server domain contains information about the Managed Servers in its domain.

4. If the Oracle instance home is not located within the Middleware home, back up the Oracle instance home. The Oracle instance home contains configuration information about system components, such as Oracle HTTP Server or Oracle Internet Directory.

For Example:
tar -cf sc_home_backup.tar $ORACLE_INSTANCE/*

5. If a Managed Server is not located within the domain, back up the Managed Server directory. 
For example:
tar -cf mg1_home_backup.tar
$MW_HOME/user_projects/domains/domain_name/servers/server_name/*

6. Back up the OraInventory directory. For example:
tar -cf Inven_home_backup.tar /u01/app/oracle/OraInventory

7. Back up OraInst.loc and oratab files, which are located in the /etc directory.

8. Back up the database repositories using the Oracle Recovery Manager (RMAN).

Note: This completes with Backup of Oracle SOA Suite.


Recovery Of Oracle SOA Suite

Note: Stop all the relevant processes that are running before recovery.

Recovering a Middleware

Home
1. Recover the Middleware home directory from backup. For example:
cd $MW_HOME
tar -xf mw_home_backup.tar

Recovering an Oracle

Weblogic Server Domain
1. Recover the domain directory from backup:
cd DOMAIN_HOME
tar -xf domain_backup.tar
Note: If you want to Recover only the Administration Server configuration then recover the domain home backup to a temporary location. Then, restore the config directory to the $DOMAIN_HOME/config location.


Recovering an Oracle

Instance Home

1. Recover the Oracle instance home directory from a backup file. For example:
cd ORACLE_INSTANCE
tar -xf sc_home_backup.tar

Note: If you are recovering an Oracle instance home that was deregistered from the domain then also register the Oracle Home as shown below.

opmnctl registerinstance -adminHost admin_server_host -adminPort admin_server_port -adminUsername username -adminPassword password -oracleInstance ORACLE_INSTANCE_dir -oracleHome ORACLE_HOME_dir -instanceName Instance_name -wlserverHome Middleware_Home

Recovering A Managed Server

RECOVERING A MANAGED SERVER WHEN IT CANN’T BE STARTED

In this scenario, the Managed Server does not operate properly or cannot be started because the configuration has been deleted or corrupted or the configuration was mistakenly changed and you cannot ascertain what was changed.

1. Recover the Middleware home from the backup, if required.
tar -xf mw_home_backup.tar

2. Create a domain template jar file for the Administration Server, using the pack utility. For
example:
pack.sh -domain=$MW_HOME/user_projects/domains/domain_name -template=/tmp/temp.jar -template_name=test_install -template_author=myname -log=/tmp/logs/my.log -managed=true

Note: Specifying the -managed=true option packs up only the Managed Servers. If you want to pack the entire domain, omit this option.

3. Unpack the domain template jar file, using the unpack utility.

unpack.sh -template=/tmp/temp.jar -domain=$MW_HOME/user_projects/domains/domain_name -log=/tmp/logs/new.log

4. Start the Managed Server and Managed Server connects to the Administration Server and updates its configuration changes.

RECOVERING A MANAGED SERVER THAT HAS A SEPARATE DIRECTORY

When Oracle SOA Suite is configured in a domain and no Managed Servers share the domain directory with the Administration Server, you must restore the Managed Server directory. For example, a domain contains two Managed Servers, one of which contains Oracle SOA Suite, but neither of the Managed Server's directories are in the same directory structure as the Administration Server..

1. Restore the Managed Server from backup:
cd ManagedServer_Home
tar -xf managed_server_backup.tar

RECOVERING A MANAGED SERVER THAT HAS A SEPARATE DIRECTORY

When Oracle SOA Suite is configured in a domain and no Managed Servers share the domain directory with the Administration Server, you must restore the Managed Server directory. For example, a domain contains two Managed Servers, one of which contains Oracle SOA Suite, but neither of the Managed Server's directories are in the same directory structure as the Administration Server..

1. Restore the Managed Server from backup:
cd ManagedServer_Home
tar -xf managed_server_backup.tar

2. Restart the Managed Server.
Note: This Hands-on completes with Backup and Recovery of Oracle SOA Suite.


Oracle WebLogic Backup & Recovery

Oracle WebLogic Backup & Recovery


What is a Backup ?

Process of copying or archiving data so that it can be restored in event of failure.

Things to backup

- Database Tier

- Application Tier

- Connector Server

- Operating System Files


Type of Backups in WebLogic ?

- Offline as Cold Backup

- Online as Hot Backup (For DB , enable archive log)

- Size of Backup

- Full

- Incremental

- Time of backup

- After big change or patch

- Daily, Weekly, Monthly, Yearly


What is Offline & Online Backups ?

Offline Backup

- Environment is down and service is not available to user before backing up files.

- Mainly done after big patches or upgrades


Online Backup

- Environment is available during the backup

- Database must be in archive log mode for online backup

- Generally : Weekly full and incremental daily


What is Full VS Partial ?

Full Backup:

- All directories of Weblogic Server


Partial Backup:

- Few files before manual change

- LDAP backup of subset of OID entity


Backup Considerations in Weblogic ?

- Backup before any big change

- Ensure database is in archive log mode and you can recover point in time

- Take backup before & after any patch

- Take regular backup of Domain & after config change


WebLogic Directory/Component to Backup ?

- Middleware Home

- Instance Home for System Components

- Domain Home for Java Components

- Cluster folder if outside DOMAIN_HOME

- oraInventory

- $HOME/beahomelist

- /etc (Linux, IBM AIX), /var/opt/oracle (Other Unix)

- Windows (Registry)

- HKEY_LOCAL_MACHINE\Software\oracle

- HKEY_LOCAL_MACHINE\System\CurrentControlSet\Services

- Oracle Database (RMAN or Manual)


What are Backup Tools ?

- For Application Tier

- O.S. Copy commands like cp, copy, xcopy, tar, jar

- Take care of

- Symbolic Links if any

- Supports long file

- Preserve permissions and ownership

- For Database Tier

- Oracle Home (O.S. Commands)

- Oracle Database (RMAN, Manual)


What is App Tier Backup?

- Directory to Backup

- Individual Files

- Domain Home (regular)

- MW_HOME (after patching)

- Tools for Backup

- cp –r , tar (Unix Utility)

- Backup tools


What is DB Tier Backup?

- Ask DBAs to backup

- Tools for Backup

- RMAN

- Copy data files in hot backup


Recommended Backup Strategy For WebLogic

- Perform a full offline backup

- Perform an online backup of run-time artifacts

- Perform a full or incremental backup of your databases


What is Recovery ?

- Process of restoring from backup or archiving

- Recover entire OIM or just files those need recovery

- Only Offline recovery is possible

- Ensure that file system and database is in sync, if patches are applied on FMW that updated DB too

- If restoring from hot backup on apps tier , there could be lock , pid files so remove them before starting services


What is App Tier Restore ?

- Directory to Restore

- Individual Files

- Domain Home

- MW_HOME

- Tools for Restore

- cp –r , untar (Unix Utility)

- Other Restore tools


What is DB Tier Restore ?

- Ask DBAs to restore

- Tools for Backup

- RMAN RESTORE

- Manual point in time recovery


WebLogic Backup Recommendations

- Before and after making configuration changes to a component or cluster

- Backup: All of the runtime backup artifacts

- Mode: Online


- Prior to deploying a custom pure Java EE application to a Managed Server or cluster

- Backup: All of the runtime backup artifacts

- Mode: Online


- After any major architectural changes to deployment architecture (such as scale out, creation of servers, or creation of clusters)

- Backup: All of the runtime backup artifacts

- Mode: Online


- Before and after product binary files (such as the WebLogic Home) are patched or upgraded

- Backup: All of the backup artifacts

- Mode: Offline


- Before and after patching or upgrading (which may impact BEA home and database)

- Backup: All of the backup artifacts

- Mode: Offline





Creating a Services In Oracle RAC

Creating a Services In Oracle RAC


To check the configuration:

srvctl config database -d RUMPROD


To create a new service:

srvctl add service -d RUMPROD -s QSHO -r RUMPROD1,RUMPROD2


To start a service on one node:

srvctl start service -d RUMPROD -s QRUM -n test02 -o open


To start a service on all nodes:

srvctl start service -d RUMPROD -s QSHO -o open


To stop service in one node/ instance:

srvctl stop service -d RUMPROD -s QSHO -n test01


To stop service in all nodes/ instances:

srvctl stop service -d RUMPROD -s QSHO


To remove a service:

Oracle Best practices is to frirst stop the service, then drop it.

srvctl stop service -d RUMPROD -s QSHO

srvctl remove service -d RUMPROD -s QSHO


To check status of database and list services:

srvctl status database -d db_unique_name -v

Or using View :

SQL> select * from gv$active_services;






Saturday, October 10, 2020

Soft Parse Analysis in AWR

Soft Parse Analysis in AWR


Before: 









After:




Note:

Oracle recommends ideally "soft parse hit ratio" 99% or at least we should target for recommended value is 90%.

Friday, October 9, 2020

Network Wait: SQL*Net more data from client in AWR report

Network Wait: SQL*Net more data from client in AWR report







Resize Operation Completed For File#

Resize Operation Completed For File# 


Symptoms:

File Extension Messages are seen in alert log.There was no explicit file resize DDL as well.

Resize operation completed for file# 45, old size 26M, new size 28M

Resize operation completed for file# 45, old size 28M, new size 30M

Resize operation completed for file# 45, old size 30M, new size 32M

Resize operation completed for file# 36, old size 24M, new size 26M


Changes:

NONE


Cause:

These file extension messages were result of diagnostic enhancement through unpublished to record automatic datafile resize operations in the alert log with a message of the form:

"File NN has auto extended from x bytes to y bytes"

This can be useful when diagnosing problems which may be impacted by a file resize. 


Solution:

In busy systems, the alert log could be completely flooded with file extension messages. A new Hidden parameter parameter "_disable_file_resize_logging" has been introduced through bug 18603375 to stop these messages getting logged into alert log.

(Unpublished) Bug 18603375 - EXCESSIVE FILE EXTENSION MESSAGE IN ALERT LOG 

Set the below parameter along with the fix.

SQL> alter system set "_disable_file_resize_logging"=TRUE ; (Its default value is FALSE)

The bug fix 18603375 is included in 12.1.0.2 onwards.


Reference metalink Doc ID 1982901.1

Monday, October 5, 2020

Starting and Stopping Pluggable Databases In 12c

Starting and Stopping Pluggable Databases In 12c


Starting the pdbs:

Check the status of the pdb's

sqlplus '/ as sysdba'

SQL> select name, open_mode from v$pdbs;

SQL> show pdbs

Note: All the PDBs are in MOUNT state.


Start a single pdb

show pdbs

alter pluggable database lub1 open;


Start all pdb's in single command

show pluggable

alter pluggable database all open;

Note: While starting the pdbs database characterset and pluggable database status information will be written in alertlog.


Stopping the pdbs:

Check the status of the pdb's

sqlplus '/ as sysdba'

SQL> select name, open_mode from v$pdbs;

SQL> show pdbs

Note: All the PDBs are in READ WRITE state.


Stop a single pdb

show pdbs

alter pluggable database lub1 close immediate;


Stop all pdb's in single command

show pluggable

alter pluggable database all close immediate;

Note: While closing the pdb's buffer cache will be flushed.


Create a trigger to open all pluggable databases.

sqlplus '/ as sysdba'

CREATE OR REPLACE TRIGGER pdb_startup AFTER STARTUP ON DATABASE

BEGIN

EXECUTE IMMEDIATE 'alter pluggable database all open';

END pdb_startup;

/







Increasing Load On The Database Server

Increasing Load On The Database Server


Creating a table:

create table t (id number, sometext varchar2(50),my_date date) tablespace test;

Now we will create a simple procedure to load bulk data:

create or replace procedure manyinserts as

v_m number;

begin

for i in 1..10000000 loop

select round(dbms_random.value() * 44444444444) + 1 into v_m from dual ;

insert /*+ new2 */ into t values (v_m, 'DOES THIS'||dbms_random.value(),sysdate);

commit;

end loop;

end;

/

Now this insert will be executed in 10 parallel sessions using dbms_job, this will fictitiously increase load on database:

create or replace procedure manysessions as

v_jobno number:=0;

begin

FOR i in 1..10 LOOP

dbms_job.submit(v_jobno,'manyinserts;', sysdate);

END LOOP;

commit;

end;

/

Now we will execute manysessions which will increase 10 parallel sessions:

exec manysessions;


Check the table size:

select bytes/1024/1024/1024  from dba_segments where segment_name='T';

ORA-01940: Cannot Drop A User that is Currently Connected

ORA-01940: Cannot Drop A User that is Currently Connected


While dropping a user in oracle database , you may face below error. ORA-01940

Problem:

SQL> drop user SCOTT cascade

2 /

drop user SCOTT cascade

*

ERROR at line 1:

ORA-01940: cannot drop a user that is currently connected


Solution:

1. Find the sessions running from this userid:

SQL> SELECT SID,SERIAL#,STATUS from v$session where username='SCOTT';

SID SERIAL# STATUS

---------- ---------- --------

44 56381 INACTIVE

323 22973 INACTIVE

2. Kill the sessions:

SQL> ALTER SYSTEM KILL SESSION '44,56381' immediate;

System altered.

SQL> ALTER SYSTEM KILL SESSION '323,22973' immediate;

System altered.

SQL> SELECT SID,SERIAL#,STATUS from v$session where username='SCOTT';

no rows selected

3. Now the drop the user:

SQL> drop user SCOTT cascade

user dropped.


Saturday, October 3, 2020

Parsing In Oracle

Parsing In Oracle

Parsing is primary phase in the SQL execution. It is followed by other stages: Execute and Fetch.


PARSING BASICS:

Whenever a SQL statement is executed, Oracle Engine performs below actions on it:

-Validate the Syntax

-Validate the objects being referenced in the statement

-Privileges assigned to user executing the Job

-Verifying if statement is already available in the shared pool. Oracle engine will calculate the hash value for the SQL statement and look in

-If statement is not already present then allocate shared memory and create a cursor in shared pool

-Generate the Execution Plan


TYPES OF PARSES

-HARD parse: It means that statement is not available in shared memory or this is a brand new statement that user is trying to execute. If your shared pool is small then also hard parse may be required as the old statement got aged out the shared pool. All the steps mentioned above for parsing need to be done for this situation. Hard parse requires extra system resources. This is also known as ‘Library Cache Miss’.

-SOFT Parse: It means that statement was executed earlier and was already parsed and is available in memory. So Oracle need to do steps 1-3 only as mentioned above since rest of the tasks were already done earlier. It is like work hard once and reap benefits multiple times. This is also known as ‘Library cache Hit’ as you got the statement parsed and available to use in the Library cache.

 

Why hard parses should be avoided:

There are two key reasons why hard parses should be kept to bare minimum required:

-Generation of an execution plan is a very CPU-intensive operation.

-Memory in the shared pool is limited and also memory operations are serialized. Memory operations happens using shared pool latches and if so many hard parses are happening then other processes in the database will have to wait in queue to get the shared pool latch. So hard parse impacts both umber of shared pool latch and library cache latch.

Analyzing Basic Performance Issues

Analyzing Basic Performance Issues 


Whenever any performance issue it is important that DBA and Developers should work together when facing a database/application issue.

When DBAs are reported a performance issue, first step is to get as much information as possible that is related to the issue. 


You can ask below questions to users/developer to collect the first level of information..

 1. What operations/program are executed?

 2. Is it Oracle seeded or custom program?

 3. How much time it used to take earlier?

 4. Is the run time increased over time or you are seeing sudden increase in run time?

 5. Was there any recent code change/migration?

 6. Is it always slow or for certain time of the day only?

 7. Is it slow for all parameters or for some specific parameters?

 8. How much data is getting processed?

 9. What is the frequency of job execution? Was there any change in frequency?

 10. Does the problem happens on both their test and production systems?


Asking above kind of questions will help you in deciding what part of system you should target.

-Target the whole system

-Target a single session

-Target a single SQL statement

 

Your goal should be to answer below three questions:

Where is time spent?

You need to identify where the time is getting consumed. Is it the code or network latency or disk I/O bottlenecks or the CPU/Memory crunch

How is time spent?

You need to find out how the time is spent and how much time is spend in each layer – database, application, network, disk etc .

How to reduce the time spent?

Finally, based on above information see where the major chunk of time is spent and how to reduce it.


 --> High level steps for System level performance issues

Use vmstat top/prstat to identify system wide CPU/Memory consumption.

Use iostat to verify if disks are the bottleneck

Use netstat/tnsping etc to verify if network is issue.

Verify if any other resource intensive processes are running on server.

Verify filesystem space.

Check alert logs, application logs, traces etc.

Check database locks

Generate AWR reports to see what is eating up resources.

Check if increasing application/database memory/redo/undo/temp/SGA will help.

 

--> High level steps for Session level performance issues

Find the Top SQLs executing under session.

Apply SQL optimization techniques on top SQLs.

Verify locking at session level

Generating AWR/ASH for that duration may help in providing useful information.

 

--> High level steps for SQL level performance issues

Apply trace, Generate TKPROF file and analyze for top resource consuming SQL.

Avoid full table scans on large tables

Possible indexes on columns contained in the WHERE clause

Use AWR/ASH reports to get collective information

Also use SQLTRPT/SQLT

Verify is statistics are current

Verify if indexes are valid and are adequate.

Verify is parallelism/materialized views/Baselines/SQL Profiles will help

Monitor V$SESSION_LONGOPS to detect long running operations

Decide on using Hints if those are helping.

Table partitioning can be thought of as an option based on kind and size of tables. 

Saturday, September 26, 2020

ASH (Active Session History) Analysis

How To Generate ASH (Active Session History) Report


To generate ASH report:

@$ORACLE_HOME/rdbms/admin/ashrpt.sql

The report provides below areas:

1. Top User Events


2. Top Service/Module


3. Top SQL Command Types



4. Top Sessions



5. Top Blocking Sessions



6. Top DB Objects


7. Top Phases of Execution


8. Top PL/SQL Procedures


9. Top SQL With Top Row Sources


11. Complete list of SQL text


12. Activity Over Time




How To Generate Explain Plan In Oracle

How To Generate Explain Plan In Oracle


1.Generating explain plan for a sql query:

We will generate the explain plan for the query "select * from test.qader_t1;"

LOADING THE EXPLAIN PLAN TO PLAN_TABLE

SQL> explain plan for select * from test.qader_t1;

 DISPLAYING THE EXPLAIN PLAN

SQL> select * from table(dbms_xplan.display);


2. Explain plan for a sql_id from cursor

set lines 2000

set pagesize 2000

SELECT * FROM table(DBMS_XPLAN.DISPLAY_CURSOR('&sql_id'));


3. Explain plan of a sql_id from AWR:

SELECT * FROM table(DBMS_XPLAN.DISPLAY_AWR('&sql_id'));

Above will display the explain plan for all the plan_hash_value in AWR. If you wish to see the plan for a particular plan_hash_value.

SELECT * FROM table(DBMS_XPLAN.DISPLAY_AWR('&sql_id',&plan_hash_value));

 


Friday, September 25, 2020

How to Stop and Start Oracle RAC Services on Single Node

How to Stop and Start Oracle RAC Services on Single Node


1) Disable the automatic startup of the Oracle Clusterware software:

crsctl disable has


2) Stop the Oracle clusterware stack:

crsctl stop has


3) Once clusterware Services has been stopped we will restart those Services:

crsctl enable has


4) Enable the automatic startup of the Oracle Clusterware software:

crsctl start has


Check the status:

ps -ef|grep d.bin|wc -l

ps -ef|grep pmon


5) Start the Oracle clusterware stack:

crsctl start crs


6)Check the status of Oracle Clusterware Resources:

crsctl stat res -t


Long Running Sessions in Oracle

Long Running Sessions in Oracle


SELECT SID, SERIAL#,OPNAME, CONTEXT, SOFAR, TOTALWORK,ROUND(SOFAR/TOTALWORK*100,2) "%_COMPLETE" FROM V$SESSION_LONGOPS WHERE OPNAME NOT LIKE '%aggregate%' AND TOTALWORK != 0 AND SOFAR <> TOTALWORK;


(or)

set lines 300

col TARGET for a40

col SQL_ID for a20

select SID,TARGET||OPNAME TARGET, TOTALWORK, SOFAR,TIME_REMAINING/60 Mins_Remaining,ELAPSED_SECONDS,SQL_ID from v$session_longops where TIME_REMAINING>0 order by TIME_REMAINING; 


Above output you can further check the sql_id, sql_text and the wait event for which query is waiting


TO find out sql_id for the above sid:

SQL> select sql_id from v$session where sid='&SID';


To find sql text for the above sql_id:

SQL> select sql_fulltext from V$sql where sql_id='1uksqt2vzxbz5';


To find wait event of the query for which it is waiting for:

SQL>select sql_id, state, last_call_et, event, program, osuser from v$session where sql_id='&sql_id';

Friday, September 11, 2020

Flashback Query (AS OF) in Oracle Database

Flashback Query (AS OF) in Oracle Database


Create a test table with no data and check the current SCN and time.

create table flashback_mqm_test (id  NUMBER(10));

SQL> SELECT current_scn, TO_CHAR(SYSTIMESTAMP, 'YYYY-MM-DD HH24:MI:SS') FROM v$database;

CURRENT_SCN TO_CHAR(SYSTIMESTAM

----------- -------------------

584777 2020-09-11 23:55:06


Add a row to the test table and commit the change.

INSERT INTO flashback_mqm_test (id) VALUES (1);

COMMIT;


If we check the contents of the table, we can see there is a single row.

SELECT COUNT(*) FROM flashback_mqm_test;

  COUNT(*)

----------

         1

The following two examples use the AS OF clause to query the table using the timestamp and SCN we captured before the row was inserted.

select count(*) from flashback_mqm_test AS OF TIMESTAMP TO_TIMESTAMP('2020-09-11 23:55:06', 'YYYY-MM-DD HH24:MI:SS');

  COUNT(*)

----------

         0

select count(*) from flashback_mqm_test AS OF SCN 584777;

  COUNT(*)

----------

         0

As we can see, the data in the table reflect the point in time specified in the AS OF clause.

There are a few things to consider when using flashback query.

-Flashback query is based on UNDO. As a result, the amount of time you can flashback is dependent on how long undo information is retained, as specified by the UNDO_RETENTION parameter.

-The flashback operation is performed by winding back the data using the undo information, like a consistent get. The time it takes to wind back the data depends on the number of changes that have happened, so flashback queries of volatile data that go back a long time can be slow.

-Flashback Data Archive (FDA) was introduced in Oracle 11g to allow you to protect UNDO information, allowing guaranteed flashback queries.

-Each table in a query can reference a different point in time.

-The AS OF clause can be included in DML and DDL statements.