# Copyright (c) 1996, 2011, Oracle and/or its affiliates. All rights reserved. # ## **************************************************************************** ## * RECOVER.BSQ is now a DERIVED OBJECT. If you are editing * ## * .../rdbms/admin/recover.bsq, STOP and edit * ## * .../rdbms/src/client/tools/rcvman/recover.txt instead. recover.bsq is * ## * derived from recover.txt, and any changes made directly to recover.bsq * ## * will be lost. * ## **************************************************************************** # # When adding new libunits that are not related to the CREATE/DROP/UPGRADE # CATALOG commands, search for STOPSTOP and add your libunits before that # marker. Because the PL/SQL packages used by CREATE/UPGRADE CATALOG are so # large, and those commands are seldom used, we don't load their libunits until # those commands are executed. So, if libunits for other commands are added # after the STOPSTOP marker, they will not be loaded during normal RMAN # initialization. # # MODIFIED (MM/DD/YY) # banand 07/29/11 - Backport banand_bug-11872103 from main # swerthei 06/13/11 - Backport swerthei_bug-12400752 from main # banand 02/14/11 - Backport banand_bug-9971106 from main # debjroy 02/08/11 - Backport debjroy_bug-9896278 from st_rdbms_11.2.0 # fsanchez 02/07/11 - Backport fsanchez_bug-7293136 from main # raguzman 01/27/11 - Backport raguzman_bug-10388660 from main # raguzman 01/14/11 - add CATALOG CLONE START WITH skeleton # molagapp 11/24/10 - Backport 10315698: basebug 10264638 # banand 11/23/10 - Backport banand_bug-10157249 from main # jkrismer 11/23/10 - backport bug-9670439 from main # fsanchez 11/22/10 - Backport fsanchez_bug-10099320 from main # molagapp 11/29/10 - bump up version to 11.2.0.3 # molagapp 04/19/10 - increase bsRecCache max limit to 32k # fsanchez 04/12/10 - bug 9550354 # banand 03/08/10 - bug 5408094 # jkrismer 02/24/10 - bug 9363515 resync hangs due to 0 PGA Target mem size # fsanchez 01/23/10 - bug 7500916 # fsanchez 01/11/10 - bug 9044053 # fsanchez 11/18/09 - remove unused code # banand 10/09/09 - bug 8928926 - automatic primary resync at standby # fsanchez 10/17/09 - bug 9018066 # banand 10/07/09 - bug 8819683 # molagapp 07/15/09 - bump up version to 11.2.0.2 # raguzman 05/27/09 - bug-8482844 getIncrementalSCN keep default # jciminsk 04/29/09 - Version to 11.2.0.1 # molagapp 03/05/09 - add NOLOGGING corruption type: bug-7396077 # molagapp 03/27/09 - bug 5739423 # fsanchez 02/18/09 - bug 4090316 # molagapp 03/17/09 - bug-8324589 # banand 02/28/09 - bug 7433077 - drop index when dropping named constr # molagapp 02/24/09 - bug-7705613 # molagapp 02/15/09 - bug 8255745 # molagapp 02/20/09 - bug 7595777 # molagapp 02/09/09 - bug-8239313 # raguzman 02/10/09 - findSpfileBackup needs to retain 10.2 signature # jkrismer 02/06/09 - bug 7475616 db_key/db_name null in RC_*_DETAILS # banand 02/03/09 - bug 7709787 # molagapp 02/05/09 - bug 7572548 # raguzman 12/09/08 - Add SPFILE RESET parm # fsanchez 09/24/08 - Bug 7609598 # fsanchez 11/26/08 - bug 7597947 # swerthei 10/03/08 - bug 7119336 - parallelize backup backupset # banand 09/19/08 - bug 7412089 # molagapp 10/08/08 - bug-5554609 # molagapp 10/02/08 - bug 7450366 # fsanchez 09/24/08 - bug 7418443 # banand 09/19/08 - bug 7412089 # banand 09/02/08 - bug 7323266 # fsanchez 08/21/08 - bug 6623413 # rlong 08/07/08 - # fsanchez 07/01/08 - Resumable duplicate # banand 07/08/08 - bug-7117200 # wfisher 06/25/08 - Bug 4641810: Support global scripts in rc views # fsanchez 06/18/08 - bug 7188363 # banand 06/17/08 - bug 6745130: detect half upgraded catalog # fsanchez 05/12/08 - bug 7036671 # banand 05/14/08 - bug 6929736, remove duplicate BSF and BDF entries # fsanchez 04/03/08 - DO not need to online for tspitr # jkrismer 04/23/08 - bug-6965089 fix 10.x catalog upgrade site_key # molagapp 11/07/07 - bug-6616834 # fsanchez 03/17/08 - Change FILE -> TARGETFILE # fsanchez 05/16/06 - project 2042 # banand 02/28/08 - bug-6198368: iso format to avoid dep on NLS_DATE_FORMAT # banand 01/26/08 - bug 6750214:df.rfile# and df.create_time can be null # banand 01/11/08 - lrg 3254831, caused by bug 6653570 # molagapp 01/09/08 - pass arguments by name to networkFileTransfer # molagapp 10/25/07 - add snapshot_cf to backupControlfile # weiwei 12/27/07 - bug 6718484 add index conf_i_db # wfisher 12/26/07 - Fix hdl_isdisk for validatebackuppiece # banand 12/12/07 - bug-6653570 # raguzman 11/28/07 - setUntilSCN rpoint param # molagapp 10/25/07 - add snapshot_cf to backupControlfile # molagapp 11/16/07 - lrg 3189648 # jciminsk 10/22/07 - Upgrade support for 11.2. # weiwei 10/17/07 - bug 6476935 add index rsr_i_stamp # jciminsk 10/08/07 - version to 11.2.0.0.0 # molagapp 10/03/07 - bug-6138791 # fsanchez 09/28/07 - bug 6418506 # wfisher 10/12/07 - Adding INCARNATION clause for archivelogs # molagapp 09/13/07 - bug-6395913 # jkrismer 08/10/07 - bug-6336292 fix RMAN-08599 throttle time # banand 08/08/07 - define rout_i_skey index # jciminsk 08/03/07 - version to 11.1.0.7.0 # banand 07/26/07 - bug 6253529 - handle dup brls # molagapp 06/20/07 - fix v$parameter.value to be case insensitive # molagapp 06/18/07 - add checks for forcing open resetlogs repair # banand 06/21/07 - bug 5913497 # molagapp 06/05/07 - bump up version to 11.1.0.6 # raguzman 05/11/07 - restore point fixup and KEEP for proxy archivelogs # banand 06/05/07 - bug 6034995 # molagapp 06/06/07 - add isArchivedLogMissing # banand 05/24/07 - bug 6035495 # molagapp 05/24/07 - lrg 2940143 # raguzman 04/03/07 - tdb_gtsc must set db_file_name_convert # banand 05/15/07 - track remote cf not updated during resync # banand 05/12/07 - allow remote resync if could not update conf/auxnames # swerthei 05/08/07 - fix rc_backup_set.multi_section # banand 05/05/07 - reset db_unique_name for backup set also # banand 05/01/07 - bug-5971763 # molagapp 04/22/07 - bug-6014994 # banand 04/07/07 - bug 5911280 - fix internal error during upgrade # molagapp 04/17/07 - add getIncarnationKey # molagapp 04/06/07 - bug-5845474 # molagapp 04/18/07 - bump up version to 11.1.0.5 # banand 03/26/07 - bug 5885624 - validate db_id for resetDatabase # jkrismer 03/27/07 - 5932181 and 5934290 fix resync for temp # raguzman 03/21/07 - Improved getIncrementalScn # molagapp 04/02/07 - bug 5899994 # molagapp 03/23/07 - resync stopSCN for plugged readonly files # molagapp 03/02/07 - add mandatory to getManualRepair # raguzman 02/19/07 - Add dbms_rcvcat.primaryDFname # swerthei 01/24/07 - fix rc_backup_set.multi_section # molagapp 02/27/07 - rename remote archived log to foreign archived log # banand 02/18/07 - bug-5881248: add getSiteName # molagapp 03/02/07 - fix noarchivelog recovery # molagapp 02/14/07 - bump up version to 11.1.0.4 # banand 02/19/07 - lrg: fix defaults for rsr_osb_allocated # molagapp 02/07/07 - bug-5870927 # banand 01/26/07 - define rc_site view # molagapp 01/07/07 - undoc CHECK READONLY, add SKIP READONLY option # banand 11/28/06 - bug 5620103 # molagapp 01/16/07 - bug 5748964 # raguzman 12/14/06 - BACKUP AS COPY FILE ... # raguzman 11/16/06 - active duplicate must copy in parallel # banand 09/17/06 - bug-5647645: resync everything only from cf # molagapp 11/24/06 - add repairParmsRec_t and getRepairParms # molagapp 12/06/06 - bug-5685839 # jkrismer 12/11/06 - bug 5247609 add rule hint to V$RMAN_STATUS query # banand 11/22/06 - partial resync without df names for converted_cf # banand 11/08/06 - bug 5219484 # raguzman 10/25/06 - Optionally specify resuse when adding SRLs # molagapp 10/17/06 - misc IDR fixes # banand 09/10/06 - refix bug 5413943 # molagapp 11/01/06 - bump up version to 11.1.0.3 # raguzman 10/19/06 - network copy password file # swerthei 08/02/06 - fix elapsed time for multi-section # banand 10/06/06 - bug 5549278 # banand 09/10/06 - bug 5441981 # molagapp 10/04/06 - bump up version to 11.1.0.2 # raguzman 09/08/06 - Add SRLs to DUPLICATE FOR STANDBY # molagapp 09/30/06 - close repair script after write # swerthei 09/29/06 - remove WITH GRANT OPTION from public grants # molagapp 09/23/06 - bug-5221399 # fsanchez 08/29/06 - prevent convert from writing to recovery area # molagapp 09/15/06 - bump up version to 11.1.0.1 # sdizdar 07/07/06 - bug-5377122: compression algorithm is controled by # rman # molagapp 08/22/06 - fix misc IDR issues # fsanchez 08/31/06 - bug 5048905 - allow delete of files when db # is not mounted # cdilling 08/07/06 - add 'create role recovery_catalog_owner' # fsanchez 08/14/06 - use single quotes for drop tablespace as tablespace # names are output with double quotes. # raguzman 08/07/06 - Resync time for guaranteed restore points # fsanchez 07/21/06 - bug 4720762 - Return parameter in conversation # status to alert user of time consuming operation # molagapp 07/31/06 - unify RECOVER BLOCK with RECOVER # banand 08/01/06 - bug 5413943 # molagapp 07/13/06 - refix bug-2107554 # molagapp 06/22/06 - Enable multiplex controlfile feasibility check # molagapp 05/12/06 - IDR integration # molagapp 06/26/06 - compatibility change for backporting bug 5106952 # raguzman 06/10/06 - resync normal restore points # raguzman 06/08/06 - Add LIST RESTORE POINT # raguzman 05/17/06 - Add RC_RESTORE_POINT view # raguzman 05/15/06 - New BackupHistory params # raguzman 03/23/06 - use named params with backupSetArchivedLog # banand 06/10/06 - 17844_phase_3: resync catalog from db_unqiue_name all # amjoshi 06/07/06 - Populate node table with new db_unique_names at # resync. # cpedrega 05/16/06 - add resync from primary hooks # molagapp 06/20/06 - lrg_2297829 # molagapp 05/23/06 - improve block corruption project # swerthei 10/07/05 - virtual private catalog # raguzman 04/17/06 - Add spfile support to duplicate # amjoshi 04/12/06 - Support for remote copy. # raguzman 01/20/06 - implement nobackup duplicate # swerthei 01/06/06 - multi-section backups # banand 05/09/06 - 17844_phase_2: spfile/change/resync changes # molagapp 01/23/06 - backup transportable tablespace # banand 04/20/06 - proj 17852 - log management # molagapp 02/09/06 - bug-5106952 # molagapp 04/21/06 - validate header changes # molagapp 04/12/06 - Project IDR # molagapp 12/21/05 - merge catalog project # banand 03/27/06 - 17844_phase_1: database site awareness # fsanchez 01/06/06 - show resync reason # banand 12/27/05 - schema changes to track node specific info # fsanchez 01/06/06 - remove comment about tmp tablespaces # amjoshi 12/16/05 - Bug 4590425: raise error if backupset is not found. # molagapp 01/30/06 - bug-4941096 # banand 02/09/06 - bug 4595644 # molagapp 01/16/06 - bump up version to 11.1.0.0 # molagapp 12/13/05 - bug 4754328 # banand 12/05/05 - bug 4755799 # fsanchez 11/17/05 - Keep track of foreign image copies # molagapp 11/15/05 - add restoreCmd_t # molagapp 11/22/05 - bug 4719372 # banand 09/27/05 - bug 4637849 # amjoshi 06/10/05 - bug 4144165: return the number of block corruptions # found by backup validate. # molagapp 10/10/05 - bug-4654856 # molagapp 10/03/05 - update version # ssamaran 01/26/05 - bug-4031420 # molagapp 09/02/05 - add ceilAsm to getSpaceRecl # molagapp 08/24/05 - fix restore failover for incrementals # molagapp 08/18/05 - bug-4548861 # amjoshi 07/25/05 - Bug 4467056: Add failover logic to bscanfdc. # banand 08/15/05 - bug-3657899, don't select corrupted records during # upgrade due to bug 2541360 # molagapp 08/05/05 - bug-4531791 # molagapp 07/23/05 - code cleanup # molagapp 06/07/05 - bug-4404093 # fsanchez 07/06/05 - Remove geterrm, as e use writeErrMsg that takes # care of removing ORA-6512, thus it is unnecesary # fsanchez 07/06/05 - do not echo passwords # molagapp 06/14/05 - bug 4430230 - add flbrp to setUntilScn # banand 06/07/05 - fix rc_database_block_corruption # banand 05/14/05 - encrypted backups # fsanchez 08/10/04 - Optimized backups changes # banand 04/22/05 - bug 4291935 # banand 04/14/05 - bug 4307379 # banand 04/12/05 - bug 4239221 # banand 04/04/05 - bug 4214635 # molagapp 03/02/05 - bug-4146404: add getDbUniqueName # banand 11/23/04 - bug-3877184 # molagapp 03/16/05 - add getLogHistoryLowSCN # molagapp 03/16/05 - retry mining resync stamp when not enough duplicates # banand 02/28/05 - trim error message from all steps # molagapp 02/10/05 - rewrite sql query for performance # molagapp 02/14/05 - remove unnecessary to_date # banand 02/08/05 - bug-4161959 # molagapp 01/26/05 - bug-3959063: dynamically find previous resync time # molagapp 01/12/05 - add instance name to channel allocation message # molagapp 01/18/05 - bug-4110708 # fsanchez 12/28/04 - Break tspitr_11 into two _11 and _11a # fsanchez 12/21/04 - show tag after backuppiece is created # banand 01/05/05 - bug-3966722 # fsanchez 12/21/04 - show tag after backuppiece is created # molagapp 11/04/04 - bug-3818377: add fname to fileRestored # molagapp 11/23/04 - re-fix bug# 3857039 - fix overflow tag, device # molagapp 11/16/04 - lrg-1745516 # molagapp 11/09/04 - bug-3999432 # raguzman 11/12/04 - fix strreqscn to reqscn # molagapp 11/01/04 - bug-3964370 # molagapp 10/08/04 - bug-3936851 - add dest to getFormat # molagapp 09/24/04 - bug-3773849 # fsanchez 08/25/04 - bug 2794801 - add declaration for getDbinc function # molagapp 09/17/04 - bug-3341831 # molagapp 09/22/04 - bug 3857039 # banand 09/14/04 - relate pieces generated by backup backupset to job # fsanchez 09/14/04 - bug 3879374: autobackup format to ASM diskgroup # molagapp 09/02/04 - remove guaranteed_flashback_scn from fb # molagapp 08/18/04 - change default block_size as null # molagapp 08/13/04 - add rlgscn, rlgtime to setUntilScn # molagapp 07/28/04 - bug-3755971 # banand 08/04/04 - bug-3330647 # molagapp 08/01/04 - bug-3498860 # molagapp 07/29/04 - bug-3741999 # banand 07/21/04 - add comments to new canonical functions # banand 07/09/04 - set select_catalog_role priv and change display cols # banand 06/23/04 - bug 3718483 # molagapp 06/29/04 - bug 3664004 # molagapp 05/15/04 - add guaranteed_flashback_scn to fb table # fsanchez 03/26/04 - bug-3114392 # banand 04/22/04 - enhance RMAN job views # htran 03/12/04 - datapump import/export for clone tablespace # molagapp 05/03/04 - add setUntilResetlogs, change resetDatabase # wyang 05/18/04 - transportable database # molagapp 04/21/04 - tempfile re-creation project # molagapp 04/14/04 - lrg_1636108 # rasivara 04/05/04 - bug 2391697: Add TranslateDatafileCancel # ssamaran 02/11/04 - bug 3417473 - fix delete force obsolete # molagapp 03/23/04 - bug-3527769 # banand 03/23/04 - NT Lrg fix # jeffyu 02/20/04 - bug 3072699 # jeffyu 11/04/03 - bug 3234433 # fsanchez 02/16/04 - bug-3408643 # jeffyu 02/09/04 - real fix for lrg 1597937 # molagapp 02/18/04 - bug-3437911 # molagapp 02/11/04 - bug 3310413 # jeffyu 02/05/04 - backing out lrg 1597937 # jeffyu 01/16/04 - lrg 1597937 # molagapp 11/27/03 - lrg 1596360 # fsanchez 11/11/03 - bug-3216150 # banand 11/11/03 - bug 2665255 # molagapp 10/22/03 - bug-3225832: add stuckMemorySize # banand 10/23/03 - bug 3217172 # molagapp 10/28/03 - add del_log, del_copy templates to reduce program size # molagapp 10/27/03 - bug-3174292: add proxy backup/restore ICDs # banand 10/10/03 - bug 3134939 # molagapp 10/16/03 - bug-3201216 # sjeyakum 09/04/03 - bug 3085334 # banand 10/08/03 - bug-3178592 # sjeyakum 10/02/03 - backout bug 3072699 # swerthei 09/12/03 - add bdf.blocks_read # molagapp 09/24/03 - bug-3154913: do not modify dbms_rcvman constants # molagapp 09/17/03 - init hdl_isdisk while crosschecking backuppiece # rasivara 09/22/03 - bug 2603837: RECOVER CONTINUE for ORA-288 # fsanchez 09/16/03 - bug-3137261 # fsanchez 09/12/03 - bug-2981422 # molagapp 09/07/03 - bug-3130338 # sdizdar 09/09/03 - bug-3115984: add delete object to fix filenames # banand 09/05/03 - bug 3125145 # molagapp 09/04/03 - bug-3126525 # banand 08/28/03 - throttling : delay time handling moved to client # fsanchez 05/26/03 - bug-2675757 # sjeyakum 08/20/03 - bug 3072699 # banand 08/14/03 - bug 2998129 # sdizdar 08/31/03 - remove rsr_keyas unique add index # jeffyu 08/25/03 - fixing resync code for fb table # sdizdar 08/25/03 - add rsr_key as unique constrain for faster query # jeffyu 08/18/03 - modifying smr3 for flashback "to before" # molagapp 08/01/03 - bug-2899010 # nsadaran 03/25/03 - restore scheduling # sdizdar 08/20/03 - bug-3005920 # molagapp 07/08/03 - use node specific config for al deletion policy # nsadaran 08/14/03 - bug-3079386 # fsanchez 05/19/03 - bug-2933117 # sjeyakum 07/25/03 - bug 3029061 # molagapp 07/22/03 - bug 3063882 - move replicate controlfile to server # banand 08/07/03 - autoextend duration endtime for minimize load option # jeffyu 07/21/03 - bug 2976535 # sdizdar 07/19/03 - remove chgWriteFlg # fsanchez 06/20/03 - lrgs_030606 # molagapp 06/12/03 - stop flashback recovery after applying stopseq # rasivara 06/18/03 - bug 3000717: drop tablespace with constraints for # skip # molagapp 06/11/03 - make incarnation resync partial # banand 05/15/03 - multi-node RMAN configuration support # molagapp 05/19/03 - add obsoleteCmd_t,fullBackups,df_incremental_change# # sdizdar 05/18/03 - fix order in backup_files view # molagapp 03/29/03 - use pga_aggregate_target when workarea_size_policy # is AUTO # rasivara 05/28/03 - bug 2968394: Add 3 col index for BP and BS # molagapp 04/12/03 - use NOCOPY for record arguments # molagapp 03/25/03 - add space reclaimable query # molagapp 03/12/03 - add isBsRecCacheMatch, isTranslatedFno # sdizdar 05/01/03 - fix backup_files view # molagapp 04/26/03 - bug-2841084: for recover of copy # fsanchez 01/16/03 - enhanced_scripts # banand 01/28/03 - restore preview # banand 01/23/03 - backup duration and throttling # molagapp 05/01/03 - fix default value for compressed # sdizdar 01/22/03 - prj 2090 (compressed backup): # - add compressed flag to bp and al table # - resync compressed flag # - add compressed to budf_start and bual_start # sdizdar 03/26/03 - bug-2867661 # nsadaran 03/11/03 - bug 2828126 # banand 02/27/03 - bug 2707377 # molagapp 02/28/03 - add rc_listBackupPipe # molagapp 02/03/03 - fix 10i package compatibility # sdizdar 02/25/03 - remove rsr from ckptNeeded() # molagapp 02/10/03 - bug 2752465 # fsanchez 02/10/03 - bug-2706829 # banand 02/10/03 - bug 2759308 # molagapp 02/03/03 - fix 10i package compatibility # molagapp 02/07/03 - add identicals to translateDataFileCopy # banand 02/03/03 - list compatibilty fix # molagapp 01/29/03 - remove isrdf column in v_$obsolete_backup_files and # v_$backup_files view # fsanchez 01/24/03 - coverage # banand 01/13/03 - krbbpc re-org # banand 01/08/03 - make 20009 as informational msg # nsadaran 01/06/03 - accessing controlfilecopies by tag and key # fsanchez 12/30/02 - bug-2595303 - default formats in the server # swerthei 01/02/03 - fix usage of sqlerrm # molagapp 12/11/02 - explicit resync refreshes aging rule # molagapp 11/25/02 - standby aging rule # banand 12/09/02 - rename recovery_dest_incarnation # to recovery_target_incarnation # banand 12/09/02 - add resetlogs to getAlBackupHistory # fsanchez 11/26/02 - lrg_1125 # jeffyu 11/04/02 - fixing en_rest_sess # fsanchez 11/22/02 - lrg_1108 # banand 11/20/02 - display 8058 msg only if more controlfiles exists # molagapp 11/19/02 - suppress dbms_backup_restore line no: for warnings # sdizdar 11/18/02 - change error on resetDatabase # molagapp 11/13/02 - fix xcf upgrade # swerthei 10/15/02 - target xtts # sdizdar 11/05/02 - add catalog to switchToCopy() # molagapp 11/06/02 - remove upgcat_116 # banand 10/31/02 - pass resetlogs time instead of stamp to checkLog # fsanchez 10/11/02 - need to make copies of controlfile using # krbbpc, so revert Senad's change # fsanchez 10/08/02 - multiple_file_copy_2 # sdizdar 10/27/02 - change msg 8133 to 8134 # jeffyu 10/02/02 - adding SQL for OPEN RESTRICTED option in DUPLICATE # sdizdar 09/15/02 - duplicate to omf support: add res_cl_setn # sdizdar 09/15/02 - standby cf support: modify raut_search # sdizdar 09/17/02 - add RC_RMAN_STATUS (rsr table) and resync for it # sdizdar 09/30/02 - fix maxDfNumber in listBackup() # molagapp 08/06/02 - recovery area project # fsanchez 09/17/02 - convert_phase2 # banand 08/26/02 - change dbinc_status to varchar2 field # banand 08/16/02 - Recovery thru resetlogs proj: # - get resetlogs stamps for offr and orl # - add doingrcv to translateArchivedLogSCNRange # - support reset database in nocatalog mode # - log_apply use resetlogs from context # mjstewar 09/17/02 - add arch log file name to flashback interface # swerthei 08/09/02 - block change tracking # sdizdar 09/20/02 - add tablespace name to restoreDatafileTo # sdizdar 08/28/02 - controlfile copies are made from snapshot # sdizdar 08/28/02 - search files: modify raut_search # mdilman 09/26/02 - add BIGFILE column to rc_tablespace # molagapp 05/23/02 - add rcvcopy boolean for apply2incr messages # sdizdar 05/22/02 - modify unregisterDatabase # banand 04/02/02 - create datafile project changes # molagapp 12/21/01 - restore failover project # fsanchez 04/24/02 - multiple_file_copy # molagapp 08/20/02 - flashback database support # sdizdar 07/26/02 - update dfRec_t # sdizdar 07/19/02 - init some variables used by listBackup() # sdizdar 06/27/02 - add view creation in dbmsrman.sql # sdizdar 01/31/02 - OEM views (part of prj 5779): # - add lbRec_t records and definitons for listBackup() # - add listBackup(), listBackupPipe(), # getRetentionPolicy() # - add rc_backup_files and rc_obsolete_backup_files # molagapp 05/19/02 - bug 2336178 # molagapp 04/23/02 - change backupPieceCreate, backupBackupPiece OMF aware # molagapp 04/16/02 - proxy archived log # molagapp 01/05/02 - catalog backuppiece changes # banand 02/27/02 - resync retries should not release lock on dbinc # banand 02/01/02 - fix 2210440 # sdizdar 02/01/02 - bug-2209822: add auxName in dfRec_t # molagapp 02/27/02 - bug 2174697 # sdizdar 01/29/02 - remove clearChannelInfo from devalloc # sdizdar 01/22/02 - bug-2187669: if allocate fails call clearChannelInfo # molagapp 12/16/01 - bug-2151427 # fsanchez 08/28/01 - bug-1958504 # molagapp 11/29/01 - update package version 9.2.0 # molagapp 11/13/01 - bug 2107554 # fsanchez 10/15/01 - bug-2022597 - Add calls to resDatafileCopy # molagapp 11/13/01 - fix cf_type initialization # molagapp 10/26/01 - add cf_type to beginCkpt # mjaeger 09/18/01 - bug 1819338: bual_del: on excep print error # banand 09/27/01 - unify server and rman autobackups - add backup_cfau # molagapp 10/21/01 - add cfSequence, cfDate to recovery record. # bu_copy, bubp_del changes to copy/delete autobackup # molagapp 05/27/01 - bug 1530744 # swerthei 10/25/01 - fix 1887868 again; qualify x$kccfe query # molagapp 10/06/01 - improve maintenance command performance # sdizdar 10/10/01 - SPFILE backup fix1: init aut_seq # sdizdar 10/02/01 - bug-2026807: autorestore can restore from copies # molagapp 10/08/01 - fix default value for scanned # sdizdar 09/05/01 - SPFILE backup: # - implement resync of V$BACKUP_SPFILE to BSF # - add skeletons for backup, restore, and autorestore # - add high_bsf_recid, BSF, and rc_backup_spfile # molagapp 09/12/01 - print proxy handle during proxy-backup # molagapp 08/30/01 - autolocate enhancements # fsanchez 08/28/01 - fix_lrgs_082201 # fsanchez 07/12/01 - bug-1874815 # swerthei 08/03/01 - bug 1887868, fix ts cursor # sdizdar 07/16/01 - bug-1859288: remove copycurrentcf, modify copycf # banand 07/17/01 - fix 1879251 # banand 07/02/01 - fix 758489 # fsanchez 05/25/01 - dup_exclude # molagapp 06/27/01 - bug 1847636 # fsanchez 06/25/01 - bug-607271: fix also upgcat_7 # fsanchez 06/19/01 - bug-1455226 # molagapp 05/06/01 - fix backup history performance # molagapp 03/07/01 - bug 704727 # swerthei 04/04/01 - add v$datafile_copy.scanned # sdizdar 03/29/01 - bug-1691869: changed deleting of logs in log_apply # banand 04/20/01 - fix 1739560 # fsanchez 04/10/01 - remove_get_put # sdizdar 04/10/01 - bug-1717268: add flags to getBackupHistory # dbeusee 03/19/01 - bug-1627542 # swerthei 04/02/01 - add new dbms_rcvcat.setDatabase calls for debugging # swerthei 03/27/01 - add index on bdf(bs_key) # swerthei 03/05/01 - ckp table cleanup # fsanchez 11/30/00 - bug-1334388 # swerthei 03/28/01 - bug 1709105, fix backup backupset delete input # fsanchez 02/07/01 - bug-1538834 # molagapp 02/09/01 - add debug io tracing # fsanchez 01/26/01 - bug-1587066 # molagapp 02/19/01 - bug 783520 # banand 02/21/01 - Fix 1653661 # molagapp 11/14/00 - add sequence# to krmicd.getLog() # fsanchez 01/25/01 - bug-1586048 # swerthei 02/02/01 - resync retries should be external messages # molagapp 01/22/01 - bug-1584989 # molagapp 01/31/01 - bug-1582073 # molagapp 11/30/00 - bug-1518515 # sdizdar 11/10/00 - bug-1496982: 8.2.0 -> 9.0.0 # sdizdar 11/09/00 - bug-1478539: add keep atributes to bsrec_t # sdizdar 10/26/00 - bug-1478503: add next_change# to krmicd.getLog # molagapp 10/24/00 - bug-1478785 # aime 10/27/00 - change 0 to NULL # sdizdar 10/08/00 - bug-1398333: keep in RC views is same as in V$ # molagapp 09/29/00 - bug-1420610 # sdizdar 09/07/00 - keep attributes added to the right views # fsanchez 09/07/00 - autorestore_testing # dbeusee 09/25/00 - bug-1272165 # fsanchez 08/24/00 - cfile-autobackup # molagapp 08/23/00 - add autobackup arg to listTranslateControlfileBackup # sdizdar 09/17/00 - proxy copy fix: # - add fmt in proxy_backup_start # sdizdar 09/11/00 - tablespace resync fix: # - fix ckptNeeded (add recid of tablespace records) # molagapp 08/28/00 - fix 8.2 upgrade # banand 09/13/00 - lrgfix for broken_pieces # gviswana 09/10/00 - Common FE: Rename ROWS. # banand 08/25/00 - change maxpiecesize to number # dbeusee 05/02/00 - rman82_maint_syntax_unification # fsanchez 07/28/00 - bug-1339583 # fsanchez 08/23/00 - fix_copyno_upgrade_82 # dbeusee 07/11/00 - rman82_messages # dbeusee 07/11/00 - rman82_debug_enhancements # fsanchez 08/24/00 - 1343942_82 # molagapp 08/22/00 - remove cl_sql skeleton # fsanchez 07/13/00 - backup_flat_files # sdizdar 08/13/00 - Show all and misc improvement: # - change RECOVERABLE/UNERCOREVABLE to LOGS/NOLOGS # banand 07/20/00 - add backup copies # molagapp 07/22/00 - restore optimization # sdizdar 06/28/00 - Configure auxfilename & exclude tablespace: # - add INCLUDED_IN_DATABASE_BACKUP to TS table and # rc_datafile view # - updated resync procedure to resync datafile # AUX_NAME and tablespace INCLUDED_IN_DATABASE_BACKUP # banand 06/26/00 - handle duplicate archivelogs # molagapp 06/09/00 - duplex formats # molagapp 06/08/00 - restartable backups # molagapp 05/31/00 - use args_start & args_create in backup skeletons # molagapp 05/31/00 - backup optimization # fsanchez 04/28/00 - Controlfile autobackup # sdizdar 05/11/00 - RMAN retention policy (keep): # - add keep in RC tables # - add keep for change & resync # molagapp 06/29/00 - bmr integration # mjstewar 07/24/00 - OMF: new newname ... to new # molagapp 06/23/00 - backuppiece fail-over. # fsanchez 06/26/00 - difs_main_0623 # fsanchez 06/14/00 - bug-1328652 # swerthei 06/08/00 - fix resync bug # fsanchez 05/26/00 - bug-1092966 # banand 05/16/00 - automatic channel allocation: # - add autochn to cleanup # - add set limit channel options and send command # to allocate (devalloc) # swerthei 06/06/00 - add archived log logminer dictionary columns # dbeusee 04/13/00 - rman82_cf_status_unification # dbeusee 11/01/99 - status_mask # molagapp 05/18/00 - block media recovery # fsanchez 05/16/00 - instantiate_standby_main # sdizdar 04/14/00 - RMAN configuration: # - add conf table and rc_rman_configuration view # - modified resync code # molagapp 05/11/00 - bug 1297434: fix config table # molagapp 05/07/00 - backup backupset: add bubs_start, bubp_name, # bu_copy, bubp_del skeletons, add getmaxcopyno # sdizdar 05/08/00 - Merged sdizdar_bug-1171700_main # sdizdar 03/17/00 - bug-1171700: improved delete of archive logs # fsanchez 04/25/00 - bug_1228207_main # sdizdar 02/25/00 - bug-1058508: removed cfileSetSnapshotName from resync # dbeusee 02/25/00 - xcheck_autolocate # swerthei 04/04/00 - add bs.validate # swerthei 03/14/00 - add backup validate # fsanchez 01/28/00 - bug_1040149 # molagapp 02/16/00 - bug 1186598: fix compatibility & no config table # # # Note that dbms_rcvman is implemented as a regular package *and* as # a fixed package. The fixed package implementation is provided to allow # the Recovery Manager to function when there is no recovery catalog package. # # Recovery manager must tell dbms_rcvman that it is expected to obtain info # from the controlfile. We cannot assume that we loaded the # fixed package implementation of dbms_rcvman because the target database # may be open. # NOTE!!!!!! # # Whenever a new library member is added or deleted, or an existing member # has an argument added or deleted, then the version number following # the "library" keyword must be changed, and a corresponding change # made to krmk.pc. library '11.2.0.3' define 'x$rman_constant' <<< package rman_constant is &const& end; >>> define 'x$debl' <<< procedure debl(func varchar2, str varchar2, dtype number , level number) is chid varchar2(2000) := krmicd.getChid; begin if (chid is null) then krmicd.writeTrc(func, rman_constant.TRACE_MSG, str, dtype, level); else krmicd.writeTrc(func, rman_constant.TRACE_MSG, 'channel '||chid||': '||str, dtype, level); end if; end; >>> define 'x$deb' <<< procedure deb(func varchar2, str varchar2, dtype number DEFAULT rman_constant.DEBUG_PLSQL, level number DEFAULT rman_constant.LEVEL_DEFAULT) is begin debl(func, str, dtype, level); end; >>> define 'x$beginBackupJobStep' <<< function beginBackupJobStep return boolean is dur_usr_endtime binary_integer; dur_softendtime binary_integer; dur_est_secs binary_integer; partial boolean; backup_cancelled exception; pragma exception_init(backup_cancelled, -19591); begin -- If returned FALSE, job must be aborted if (NOT krmicd.beginJobStep(dur_usr_endtime, dur_softendtime, dur_est_secs, partial)) then if (NOT partial) then raise backup_cancelled; end if; return FALSE; end if; sys.dbms_backup_restore.setLimit(sys.dbms_backup_restore.dur_endtime, dur_usr_endtime); sys.dbms_backup_restore.setLimit(sys.dbms_backup_restore.dur_est_secs, dur_est_secs); sys.dbms_backup_restore.setLimit(sys.dbms_backup_restore.dur_softendtime, dur_softendtime); return TRUE; end; >>> define 'x$endBackupJobStep' <<< function endBackupJobStep (failed IN boolean, errcode IN binary_integer) return boolean is ignore_errs boolean; sleep_secs number; begin ignore_errs := krmicd.endJobStep(failed, errcode); sys.dbms_backup_restore.getlimit (sys.dbms_backup_restore.sleep_secs, sleep_secs); -- display server throttled time for the last backup/copy -- bug 6336292 fix min field of throttle time if (sleep_secs > 0) then krmicd.writeMsg(8599, krmicd.getChid, to_char(floor(sleep_secs/3600)) || ':' || to_char(floor(mod(sleep_secs, 3600)/60), 'FM09') || ':' || to_char(mod(sleep_secs,60), 'FM09')); end if; -- if backup failed and still want to ignore errors, return TRUE if (failed and ignore_errs) then krmicd.clearErrors; return TRUE; end if; -- if backup did not fail, return TRUE if (not failed) then return TRUE; end if; return FALSE; end; >>> define 'x$setBackupParams' <<< procedure setBackupParams(docopy in boolean) is p1 number; p2 number; p3 number; p4 number; t1 varchar2(1025); t2 varchar2(1); t3 varchar2(1); begin if (not docopy and krmicd.getParams(1, p1, p2, p3, p4, t1, t2, t3)) then sys.dbms_backup_restore.setparms(p0=>1, p1=>1, p2=>p1, p3=>p2, p5=>t1); end if; return; end; >>> define 'x$setRestoreParams' <<< procedure setRestoreParams is p1 number; p2 number; p3 number; p4 number; t1 varchar2(1025); t2 varchar2(1); t3 varchar2(1); begin if (krmicd.getParams(2, p1, p2, p3, p4, t1, t2, t3)) then sys.dbms_backup_restore.setparms(p0=>2, p5=>t1); loop exit when not krmicd.getParams(3, p1, p2, p3, p4, t1, t2, t3); sys.dbms_backup_restore.setparms(p0=>2, p5=>t1); end loop; end if; return; end; >>> define 'v$controlfile_record_section' <<< define table v$controlfile_record_section ( TYPE VARCHAR2(128), RECORD_SIZE NUMBER, RECORDS_TOTAL NUMBER, RECORDS_USED NUMBER, FIRST_INDEX NUMBER, LAST_INDEX NUMBER, LAST_RECID NUMBER ); >>> define 'v$controlfile' <<< define table v$controlfile ( STATUS VARCHAR2(7), NAME VARCHAR2(513), IS_RECOVERY_DEST_FILE VARCHAR2(3), BLOCK_SIZE NUMBER, FILE_SIZE_BLKS NUMBER ); >>> define 'v$database' <<< define table v$database ( DBID NUMBER, NAME VARCHAR2(9), CREATED DATE, RESETLOGS_CHANGE# NUMBER, RESETLOGS_TIME DATE, PRIOR_RESETLOGS_CHANGE# NUMBER, PRIOR_RESETLOGS_TIME DATE, LOG_MODE VARCHAR2(12), CHECKPOINT_CHANGE# NUMBER, ARCHIVE_CHANGE# NUMBER, CONTROLFILE_TYPE VARCHAR2(7), CONTROLFILE_CREATED DATE, CONTROLFILE_SEQUENCE# NUMBER, CONTROLFILE_CHANGE# NUMBER, CONTROLFILE_TIME DATE, OPEN_RESETLOGS VARCHAR2(11), VERSION_TIME DATE, RECOVERY_TARGET_INCARNATION# NUMBER, LAST_OPEN_INCARNATION# NUMBER, FLASHBACK_ON VARCHAR2(3), DB_UNIQUE_NAME VARCHAR2(30), DATABASE_ROLE VARCHAR2(16), CONTROLFILE_CONVERTED VARCHAR2(3), PRIMARY_DB_UNIQUE_NAME VARCHAR2(30) ); >>> define 'v$tablespace' <<< define table v$tablespace ( TS# NUMBER, NAME VARCHAR2(30), INCLUDED_IN_DATABASE_BACKUP VARCHAR2(3), BIGFILE VARCHAR2(3), ENCRYPT_IN_BACKUP VARCHAR2(3) ); >>> # NOTE!! NOTE!! NOTE!! #- #- Per bug-8239313 it is not recommended to use v$datafile view #- when database isn't open because the view joins x$kcvfh and #- has to identify the file. When the database has 4000 files, it #- takes almost 2 minutes for v$datafile to response. So, #- if you need just name of datafile, then query x$kccfn and #- if you need datafile information, then query x$kccfe. #- define 'v$datafile' <<< define table v$datafile ( BLABLA NUMBER ); >>> define 'v$tempfile' <<< define table v$tempfile ( FILE# NUMBER, CREATION_CHANGE# NUMBER, NAME VARCHAR2(513) ); >>> define 'x$kccrt' <<< define table x$kccrt ( INST_ID NUMBER, RTNUM NUMBER, RTSTA NUMBER, RTNLF NUMBER, RTSEQ NUMBER, RTENB VARCHAR2(16), RTETS VARCHAR2(20), RTDIS VARCHAR2(16), RTDIT VARCHAR2(20) ); >>> define 'x$kcctir' <<< define table x$kcctir ( INST_ID NUMBER, TIRNUM NUMBER ); >>> define 'v$log' <<< define table v$log ( GROUP# NUMBER, THREAD# NUMBER, SEQUENCE# NUMBER, BYTES NUMBER, MEMBERS NUMBER, ARCHIVED VARCHAR2(3), STATUS VARCHAR2(16), FIRST_CHANGE# NUMBER, FIRST_TIME DATE ); >>> define 'v$standby_log' <<< define table v$standby_log ( GROUP# NUMBER, THREAD# NUMBER, SEQUENCE# NUMBER, BYTES NUMBER, MEMBERS NUMBER, ARCHIVED VARCHAR2(3), STATUS VARCHAR2(16), FIRST_CHANGE# NUMBER, FIRST_TIME DATE ); >>> define 'v$logfile' <<< define table v$logfile ( GROUP# NUMBER, STATUS VARCHAR2(7), MEMBER VARCHAR2(513), STATUS VARCHAR2(7) ); >>> define 'v$rman_configuration' <<< DEFINE TABLE v$rman_configuration ( CONF# NUMBER, NAME VARCHAR2(65), VALUE VARCHAR2(1025) ); >>> define 'v$log_history' <<< define table v$log_history ( RECID NUMBER, STAMP NUMBER, THREAD# NUMBER, SEQUENCE# NUMBER, FIRST_CHANGE# NUMBER, FIRST_TIME DATE, NEXT_CHANGE# NUMBER, RESETLOGS_CHANGE# NUMBER, RESETLOGS_TIME DATE ); >>> define 'v$archived_log' <<< define table v$archived_log ( RECID NUMBER, STAMP NUMBER, NAME VARCHAR2(513), DEST_ID NUMBER, THREAD# NUMBER, SEQUENCE# NUMBER, RESETLOGS_CHANGE# NUMBER, RESETLOGS_TIME DATE, FIRST_CHANGE# NUMBER, FIRST_TIME DATE, NEXT_CHANGE# NUMBER, NEXT_TIME DATE, BLOCKS NUMBER, BLOCK_SIZE NUMBER, CREATOR VARCHAR2(7), REGISTRAR VARCHAR2(7), STANDBY_DEST VARCHAR2(3), ARCHIVED VARCHAR2(3), APPLIED VARCHAR2(3), DELETED VARCHAR2(3), STATUS VARCHAR2(1), COMPLETION_TIME DATE, DICTIONARY_BEGIN VARCHAR2(3), DICTIONARY_END VARCHAR2(3), IS_RECOVERY_DEST_FILE VARCHAR2(3), COMPRESSED VARCHAR2(3), END_OF_REDO_TYPE VARCHAR2(10) ); >>> define 'v$recovery_status' <<< define table v$recovery_status ( RECOVERY_CHECKPOINT DATE, THREAD NUMBER, SEQUENCE_NEEDED NUMBER, SCN_NEEDED VARCHAR2(16), TIME_NEEDED DATE, PREVIOUS_LOG_NAME VARCHAR2(513), PREVIOUS_LOG_STATUS VARCHAR2(13), REASON VARCHAR2(13) ); >>> define 'v$offline_range' <<< define table v$offline_range ( RECID NUMBER, STAMP NUMBER, FILE# NUMBER, OFFLINE_CHANGE# NUMBER, ONLINE_CHANGE# NUMBER, ONLINE_TIME DATE, RESETLOGS_CHANGE# NUMBER, RESETLOGS_TIME DATE ); >>> define 'v$backup_set' <<< define table v$backup_set ( RECID NUMBER, STAMP NUMBER, SET_STAMP NUMBER, SET_COUNT NUMBER, BACKUP_TYPE VARCHAR2(1), CONTROLFILE_INCLUDED VARCHAR2(3), INCREMENTAL_LEVEL NUMBER, PIECES NUMBER, START_TIME DATE, COMPLETION_TIME DATE, ELAPSED_SECONDS NUMBER, BLOCK_SIZE NUMBER, STATUS VARCHAR2(1), INPUT_FILE_SCAN_ONLY VARCHAR2(3), KEEP VARCHAR2(3), KEEP_UNTIL DATE, KEEP_OPTIONS VARCHAR2(13), MULTI_SECTION VARCHAR2(3) ); >>> define 'v$backup_piece' <<< define table v$backup_piece ( RECID NUMBER, STAMP NUMBER, SET_STAMP NUMBER, SET_COUNT NUMBER, PIECE# NUMBER, COPY# NUMBER, DEVICE_TYPE VARCHAR2(17), HANDLE VARCHAR2(513), COMMENTS VARCHAR2(81), MEDIA VARCHAR2(65), MEDIA_POOL NUMBER, CONCUR VARCHAR2(3), TAG VARCHAR2(32), STATUS VARCHAR2(1), START_TIME DATE, COMPLETION_TIME DATE, ELAPSED_SECONDS NUMBER, DELETED VARCHAR2(3), BYTES NUMBER, IS_RECOVERY_DEST_FILE VARCHAR2(3), RMAN_STATUS_RECID NUMBER, RMAN_STATUS_STAMP NUMBER, COMPRESSED VARCHAR2(3), ENCRYPTED VARCHAR2(3), BACKED_BY_OSB VARCHAR2(3) ); >>> define 'v$backup_datafile' <<< define table v$backup_datafile ( RECID NUMBER, STAMP NUMBER, SET_STAMP NUMBER, SET_COUNT NUMBER, FILE# NUMBER, CREATION_CHANGE# NUMBER, CREATION_TIME DATE, RESETLOGS_CHANGE# NUMBER, RESETLOGS_TIME DATE, INCREMENTAL_LEVEL NUMBER, INCREMENTAL_CHANGE# NUMBER, CHECKPOINT_CHANGE# NUMBER, CHECKPOINT_TIME DATE, ABSOLUTE_FUZZY_CHANGE# NUMBER, MARKED_CORRUPT NUMBER, MEDIA_CORRUPT NUMBER, LOGICALLY_CORRUPT NUMBER, DATAFILE_BLOCKS NUMBER, BLOCKS NUMBER, BLOCK_SIZE NUMBER, OLDEST_OFFLINE_RANGE NUMBER, COMPLETION_TIME DATE, CONTROLFILE_TYPE VARCHAR2(1), USED_CHANGE_TRACKING VARCHAR2(3), BLOCKS_READ NUMBER, USED_OPTIMIZATION VARCHAR2(3), FOREIGN_DBID NUMBER, PLUGGED_READONLY VARCHAR2(3), PLUGIN_CHANGE# NUMBER, PLUGIN_RESETLOGS_CHANGE# NUMBER, PLUGIN_RESETLOGS_TIME DATE, SECTION_SIZE NUMBER ); >>> define 'v$backup_corruption' <<< define table v$backup_corruption ( RECID NUMBER, STAMP NUMBER, SET_STAMP NUMBER, SET_COUNT NUMBER, PIECE# NUMBER, FILE# NUMBER, BLOCK# NUMBER, BLOCKS NUMBER, CORRUPTION_CHANGE# NUMBER, MARKED_CORRUPT VARCHAR2(3), CORRUPTION_TYPE VARCHAR2(9) ); >>> define 'v$backup_redolog' <<< define table v$backup_redolog ( RECID NUMBER, STAMP NUMBER, SET_STAMP NUMBER, SET_COUNT NUMBER, THREAD# NUMBER, SEQUENCE# NUMBER, RESETLOGS_CHANGE# NUMBER, RESETLOGS_TIME DATE, FIRST_CHANGE# NUMBER, FIRST_TIME DATE, NEXT_CHANGE# NUMBER, NEXT_TIME DATE, BLOCKS NUMBER, BLOCK_SIZE NUMBER, TERMINAL VARCHAR2(3) ); >>> define 'v$datafile_copy' <<< define table v$datafile_copy ( RECID NUMBER, STAMP NUMBER, NAME VARCHAR2(513), TAG VARCHAR2(32), FILE# NUMBER, RFILE# NUMBER, CREATION_CHANGE# NUMBER, CREATION_TIME DATE, RESETLOGS_CHANGE# NUMBER, RESETLOGS_TIME DATE, INCREMENTAL_LEVEL NUMBER, CHECKPOINT_CHANGE# NUMBER, CHECKPOINT_TIME DATE, ABSOLUTE_FUZZY_CHANGE# NUMBER, RECOVERY_FUZZY_CHANGE# NUMBER, RECOVERY_FUZZY_TIME DATE, ONLINE_FUZZY VARCHAR2(3), BACKUP_FUZZY VARCHAR2(3), MARKED_CORRUPT NUMBER, MEDIA_CORRUPT NUMBER, LOGICALLY_CORRUPT NUMBER, BLOCKS NUMBER, BLOCK_SIZE NUMBER, OLDEST_OFFLINE_RANGE NUMBER, DELETED VARCHAR2(3), STATUS VARCHAR2(1), COMPLETION_TIME DATE, CONTROLFILE_TYPE VARCHAR2(1), KEEP VARCHAR2(3), KEEP_UNTIL DATE, KEEP_OPTIONS VARCHAR2(13), SCANNED VARCHAR2(3), IS_RECOVERY_DEST_FILE VARCHAR2(3), RMAN_STATUS_RECID NUMBER, RMAN_STATUS_STAMP NUMBER, FOREIGN_DBID NUMBER, PLUGGED_READONLY VARCHAR2(3), PLUGIN_CHANGE# NUMBER, PLUGIN_RESETLOGS_CHANGE# NUMBER, PLUGIN_RESETLOGS_TIME DATE ); >>> define 'v$copy_corruption' <<< define table v$copy_corruption ( RECID NUMBER, STAMP NUMBER, COPY_RECID NUMBER, COPY_STAMP NUMBER, FILE# NUMBER, BLOCK# NUMBER, BLOCKS NUMBER, CORRUPTION_CHANGE# NUMBER, MARKED_CORRUPT VARCHAR2(3), CORRUPTION_TYPE VARCHAR2(9) ); >>> define 'x$kccblkcor' <<< define table x$kccblkcor ( BLKRID NUMBER, BLKSTM NUMBER, BLKTYPE NUMBER, BLKFNO NUMBER, BLKCRS NUMBER, BLKCRT DATE, BLKTOT NUMBER, BLKSBLK NUMBER, BLKSCN NUMBER ); >>> define 'v$deleted_object' <<< define table v$deleted_object ( RECID NUMBER, STAMP NUMBER, TYPE VARCHAR2(30), OBJECT_RECID NUMBER, OBJECT_STAMP NUMBER, OBJECT_DATA NUMBER, SET_STAMP NUMBER, SET_COUNT NUMBER ); >>> define 'v$backup_spfile' <<< define table v$backup_spfile ( RECID NUMBER, STAMP NUMBER, SET_STAMP NUMBER, SET_COUNT NUMBER, MODIFICATION_TIME DATE, BYTES NUMBER, COMPLETION_TIME DATE, DB_UNIQUE_NAME VARCHAR2(30) ); >>> define 'x$kccdi' <<< define table x$kccdi ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, DIDFV NUMBER, DIDFC NUMBER, DICTS VARCHAR2(20), DIDBN VARCHAR2(9), DIRDB NUMBER, DICCT VARCHAR2(20), DIFLG NUMBER, DIIRS VARCHAR2(16), DIRLS VARCHAR2(16), DIRLC VARCHAR2(20), DIRLC_I NUMBER, DIPRS VARCHAR2(16), DIPRC VARCHAR2(20), DIPRC_I NUMBER, DIRDV NUMBER, DIRDC NUMBER, DINDF NUMBER, DINOF NUMBER, DICPT NUMBER, DISCN VARCHAR2(16), DINET NUMBER, DINOT NUMBER, DIOTH NUMBER, DIOTT NUMBER, DIETB RAW(8), DIMLM NUMBER, DIMDM NUMBER, DIARH NUMBER, DIART NUMBER, DIFAS VARCHAR2(16), DICKP_SCN VARCHAR2(16), DICKP_TIM VARCHAR2(20), DICSQ NUMBER, DIDBI NUMBER, DISSC_SCN VARCHAR2(16), DISSC_TIM VARCHAR2(20), DISFP NUMBER, DIBSC NUMBER, DIPOFB NUMBER, DIPNFB NUMBER, DICOFB NUMBER, DICNFB NUMBER, DIFL2 NUMBER ); >>> define 'x$kccdi2' <<< define table x$kccdi2 ( DI2IRT VARCHAR2(20), DI2FBRET NUMBER ); >>> define 'x$kccfe' <<< define table x$kccfe ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, FENUM NUMBER, FECSZ NUMBER, FEBSZ NUMBER, FESTA NUMBER, FECRC_SCN VARCHAR2(16), FECRC_TIM VARCHAR2(20), FECRC_THR NUMBER, FECRC_RBA_SEQ NUMBER, FECRC_RBA_BNO NUMBER, FECRC_RBA_BOF NUMBER, FECRC_ETB RAW(8), FECPS VARCHAR2(16), FECPT VARCHAR2(20), FECPC NUMBER, FESTS VARCHAR2(16), FESTT VARCHAR2(20), FEBSC VARCHAR2(16), FEFNH NUMBER, FEFNT NUMBER, FEDUP NUMBER, FEURS VARCHAR2(16), FEURT VARCHAR2(20), FEOFS VARCHAR2(16), FEONC_SCN VARCHAR2(16), FEONC_TIM VARCHAR2(20), FEONC_THR NUMBER, FEONC_RBA_SEQ NUMBER, FEONC_RBA_BNO NUMBER, FEONC_RBA_BOF NUMBER, FEONC_ETB RAW(8), FEPOR NUMBER, FETSN NUMBER, FETSI NUMBER, FERFN NUMBER, FEPFT NUMBER, FEDOR NUMBER, FEPDI NUMBER, FEFDB NUMBER, FEPLG_SCN VARCHAR2(16), FEPAX NUMBER, FEFLG NUMBER, FEPLUS NUMBER, FEPRLS NUMBER, FEPRLT DATE, FEFCRS NUMBER, FEFCRT DATE, FEFCPS NUMBER, FEFCPT DATE ); >>> define 'x$kcctf' <<< define table x$kcctf ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, TFNUM NUMBER, TFAFN NUMBER, TFCSZ NUMBER, TFBSZ NUMBER, TFSTA NUMBER, TFCRC_SCN VARCHAR2(16), TFCRC_TIM VARCHAR2(20), TFFNH NUMBER, TFFNT NUMBER, TFDUP NUMBER, TFTSN NUMBER, TFTSI NUMBER, TFRFN NUMBER, TFPFT NUMBER, TFMSZ NUMBER, TFNSZ NUMBER ); >>> define 'x$kccfle' <<< define table x$kccfle ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, FLELTIM VARCHAR2(20) ); >>> define 'x$kccfn' <<< define table x$kccfn ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, FNNUM NUMBER, FNTYP NUMBER, FNFNO NUMBER, FNFWD NUMBER, FNBWD NUMBER, FNFLG NUMBER, FNNAM VARCHAR2(513), FNONM VARCHAR2(513), FNUNN NUMBER ); >>> define 'v$mystat' <<< define table v$mystat ( SID NUMBER, STATISTIC# NUMBER, VALUE NUMBER ); >>> define 'v$instance' <<< define table v$instance ( INSTANCE_NUMBER NUMBER, INSTANCE_NAME VARCHAR2(16), HOST_NAME VARCHAR2(64), VERSION VARCHAR2(17), STARTUP_TIME DATE, STATUS VARCHAR2(7), PARALLEL VARCHAR2(3), THREAD# NUMBER, ARCHIVER VARCHAR2(7), LOG_SWITCH_WAIT VARCHAR2(11), LOGINS VARCHAR2(10), SHUTDOWN_PENDING VARCHAR2(3) ); >>> define 'gv$instance' <<< define table gv$instance ( INST_ID NUMBER, INSTANCE_NUMBER NUMBER, INSTANCE_NAME VARCHAR2(16), HOST_NAME VARCHAR2(64), VERSION VARCHAR2(17), STARTUP_TIME DATE, STATUS VARCHAR2(7), PARALLEL VARCHAR2(3), THREAD# NUMBER, ARCHIVER VARCHAR2(7), LOG_SWITCH_WAIT VARCHAR2(11), LOGINS VARCHAR2(10), SHUTDOWN_PENDING VARCHAR2(3) ); >>> define 'v$parameter' <<< define table v$parameter ( NUM NUMBER, NAME VARCHAR2(64), TYPE NUMBER, VALUE VARCHAR2(512), ISDEFAULT VARCHAR2(9), ISSES_MODIFIABLE VARCHAR2(5), ISSYS_MODIFIABLE VARCHAR2(9), ISMODIFIED VARCHAR2(10), ISADJUSTED VARCHAR2(5), DESCRIPTION VARCHAR2(64) ); >>> define 'v$parameter2' <<< define table v$parameter2 ( NUM NUMBER, NAME VARCHAR2(80), TYPE NUMBER, VALUE VARCHAR2(512), DISPLAY_VALUE VARCHAR2(512), ISDEFAULT VARCHAR2(6), ISSES_MODIFIABLE VARCHAR2(5), ISSYS_MODIFIABLE VARCHAR2(9), ISINSTANCE_MODIFIABLE VARCHAR2(5), ISMODIFIED VARCHAR2(10), ISADJUSTED VARCHAR2(5), ISDEPRECATED VARCHAR2(5), DESCRIPTION VARCHAR2(255), ORDINAL NUMBER, UPDATE_COMMENT VARCHAR2(255) ); >>> define 'v$memory_dynamic_components' <<< define table v$memory_dynamic_components ( COMPONENT VARCHAR2(64), CURRENT_SIZE NUMBER, MIN_SIZE NUMBER, MAX_SIZE NUMBER, USER_SPECIFIED_SIZE NUMBER, OPER_COUNT NUMBER, LAST_OPER_TYPE VARCHAR2(13), LAST_OPER_MODE VARCHAR2(9), LAST_OPER_TIME DATE, GRANULE_SIZE NUMBER ); >>> define 'x$kcrmx' <<< define table x$kcrmx ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, BCF NUMBER, CBR NUMBER, TBR NUMBER, SBR NUMBER, IRS VARCHAR2(16), INS VARCHAR2(16), STM VARCHAR2(20), CKPSCN VARCHAR2(16), CKPTIM VARCHAR2(20), CKPETB RAW(8), EOK NUMBER, NAM VARCHAR2(513), THR NUMBER, SCN VARCHAR2(16), TIM VARCHAR2(20), SEQ NUMBER, LOS VARCHAR2(16), FAM NUMBER, UIS NUMBER, ORT VARCHAR2(20), NRT VARCHAR2(20), FLG NUMBER, MRS NUMBER, NTX NUMBER, CTC NUMBER, RLS NUMBER, RLC NUMBER ); >>> # the member name must begin with x$ in order to load it automatically define 'x$undo$' <<< define table undo$ ( US# NUMBER, NAME VARCHAR2(30), USER# NUMBER, FILE# NUMBER, BLOCK# NUMBER, SCNBAS NUMBER, SCNWRP NUMBER, XACTSQN NUMBER, UNDOSQN NUMBER, INST# NUMBER, STATUS$ NUMBER, TS# NUMBER, UGRP# NUMBER, KEEP NUMBER, OPTIMAL NUMBER, FLAGS NUMBER, SPARE1 NUMBER, SPARE2 NUMBER, SPARE3 NUMBER, SPARE4 VARCHAR2(1000), SPARE5 VARCHAR2(1000), SPARE6 DATE ); >>> define 'v$database_incarnation' <<< define table v$database_incarnation ( INCARNATION# NUMBER, RESETLOGS_CHANGE# NUMBER, RESETLOGS_TIME DATE, PRIOR_RESETLOGS_CHANGE# NUMBER, PRIOR_RESETLOGS_TIME DATE, STATUS VARCHAR2(7), RESETLOGS_ID NUMBER, PRIOR_INCARNATION# NUMBER ); >>> define 'v$rman_status' <<< define table v$rman_status ( SID NUMBER, RECID NUMBER, STAMP NUMBER, PARENT_RECID NUMBER, PARENT_STAMP NUMBER, SESSION_RECID NUMBER, SESSION_STAMP NUMBER, ROW_LEVEL NUMBER, ROW_TYPE VARCHAR2(20), COMMAND_ID VARCHAR2(33), OPERATION VARCHAR2(33), STATUS VARCHAR2(23), MBYTES_PROCESSED NUMBER, START_TIME DATE, END_TIME DATE, INPUT_BYTES NUMBER, OUTPUT_BYTES NUMBER, OPTIMIZED VARCHAR2(3), OBJECT_TYPE VARCHAR2(80), OUTPUT_DEVICE_TYPE VARCHAR2(17), OSB_ALLOCATED VARCHAR2(3) ); >>> define 'v$rman_output' <<< define table v$rman_output ( SID NUMBER, RECID NUMBER, STAMP NUMBER, RMAN_STATUS_RECID NUMBER, RMAN_STATUS_STAMP NUMBER, SESSION_RECID NUMBER, SESSION_STAMP NUMBER, OUTPUT VARCHAR2(129) ); >>> define 'x$kccrdi' <<< define table x$kccrdi ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, LOCATION VARCHAR2(513), SLIMIT NUMBER, SUSED NUMBER, SDATAFILE NUMBER, FCNT NUMBER, SRECL NUMBER, SYSAVAIL NUMBER, OMRTIME DATE, FLAGS NUMBER ); >>> define 'x$kccrsp' <<< define table x$kccrsp ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, RSPNAME VARCHAR2(128), RSPINCARN NUMBER, RSPSCN VARCHAR2(16), RSPTIME VARCHAR2(20), RSPRSPTIME VARCHAR2(20), RSPLGSZ VARCHAR2(23), RSPFLAGS NUMBER, RSPFSCN VARCHAR2(16) ); >>> define 'x$kccnrs' <<< define table x$kccnrs ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, NRSNAME VARCHAR2(128), NRSINCARN NUMBER, NRSSCN VARCHAR2(16), NRSTIME VARCHAR2(20), NRSFLAGS NUMBER, NRSRSPTIME VARCHAR2(20), NRSRID NUMBER, NRSSTM NUMBER ); >>> # # If "keep_for_rcv" is true then this function will consider deleting only # archive logs which are restored and older than recovery checkpoint scn. # Otherwise, this function will consider deleting all archive logs which are # restored. # # If "del_all" is true then this function deletes all type of archive logs # which are considered by "keep_for_rcv". Otherwise, this function will # delete only archive logs which are restored to recovery area (OMF). # # The logic in this libmem must be kept in sync with what we have for log # deletion in krmklstc(). # define 'x$delete_logs' <<< procedure delete_logs(keep_for_rcv IN boolean, del_all IN boolean) IS alfrec v$archived_log%ROWTYPE; scn_needed_num number; begin -- Get scn which needed by recovery process. if (keep_for_rcv) then select to_number(scn_needed) into scn_needed_num from v$recovery_status; else scn_needed_num := 9e125; end if; -- Scan loglist_krmkc and delete logs which are not needed by recovery -- process. loop <> -- Call krmkglog to get restored log which has next_change# less than -- scn_needed_num. The krmkglog will also delete it from the -- loglist_krmkc, so that the next call does not get the same log alfrec.name := krmicd.getLog(scn_needed_num, alfrec.thread#, alfrec.sequence#, alfrec.recid, alfrec.stamp, alfrec.resetlogs_change#, alfrec.first_change#, alfrec.next_change#, alfrec.block_size, alfrec.is_recovery_dest_file); exit when (alfrec.name is NULL); if (not del_all and alfrec.is_recovery_dest_file != 'YES') then goto getnext; end if; begin sys.dbms_backup_restore.deleteArchivedLog( recid => alfrec.recid, stamp => alfrec.stamp, fname => alfrec.name, thread => alfrec.thread#, sequence => alfrec.sequence#, resetlogs_change => alfrec.resetlogs_change#, first_change => alfrec.first_change#, blksize => alfrec.block_size); exception when others then krmicd.writeMsg(8510, to_char(alfrec.thread#), to_char(alfrec.sequence#)); raise; end; krmicd.writeMsg(8071, krmicd.getChid); krmicd.writeMsg(8514, alfrec.name, to_char(alfrec.recid), to_char(alfrec.stamp)); end loop; end; >>> define 'x$name_log' <<< procedure name_log(memnum IN number ,arch_recid IN number ,arch_stamp IN number ,thread IN number ,sequence IN number ,fname IN varchar2 ,blocks IN number ,blksize IN number ,files IN OUT binary_integer ,first_time IN OUT boolean ,docopies IN boolean ,validatecmd IN boolean) IS duplicate boolean; in_use exception; del_for_space exception; pragma exception_init(in_use, -19584); pragma exception_init(del_for_space, -19805); begin sys.dbms_backup_restore.backupArchivedLog(arch_recid => arch_recid, arch_stamp => arch_stamp, duplicate => duplicate); if first_time then if validatecmd then krmicd.writeMsg(8146, krmicd.getChid); elsif not docopies then krmicd.writeMsg(8014, krmicd.getChid); end if; first_time := FALSE; end if; if not duplicate then files := files + 1; krmicd.writeMsg(8504, to_char(thread), to_char(sequence), to_char(arch_recid), to_char(arch_stamp)); end if; deb('name_log', 'blocks=' || blocks || ' block_size=' || blksize, rman_constant.DEBUG_IO, rman_constant.LEVEL_MIN); exception when in_use then krmicd.writeMsg(8603, fname); krmicd.clearErrors; when del_for_space then krmicd.writeMsg(8604, fname); krmicd.clearErrors; end; >>> define 'x$name_datafilecopy' <<< procedure name_datafilecopy(memnum IN number ,copy_recid IN number ,copy_stamp IN number ,fname IN varchar2 ,dfnumber IN number ,blocks IN number ,blksize IN number ,tsname IN OUT varchar2 ,files IN OUT binary_integer ,docopies IN boolean ,max_corrupt IN number default 0 ,since_change IN number default 0) IS in_use exception; del_for_space exception; pragma exception_init(in_use, -19584); pragma exception_init(del_for_space, -19805); begin if files < memnum then sys.dbms_backup_restore.backupDataFileCopy( copy_recid => copy_recid, copy_stamp => copy_stamp, max_corrupt => max_corrupt, since_change => since_change); if docopies and dfnumber != 0 then tsname := sys.dbms_backup_restore.getTsNameFromDataFileCopy(fname, dfnumber); end if; files := files + 1; if not docopies then krmicd.writeMsg(8033, krmicd.getChid, to_char(dfnumber, 'FM09999')); krmicd.writeMsg(8506, fname); else krmicd.writeMsg(8587, to_char(dfnumber, 'FM09999'), fname); end if; deb('budc_name', 'blocks=' || blocks || ' block_size=' || blksize, rman_constant.DEBUG_IO, rman_constant.LEVEL_MIN); end if; exception when in_use then krmicd.writeMsg(8603, fname); krmicd.clearErrors; when del_for_space then krmicd.writeMsg(8604, fname); krmicd.clearErrors; end; >>> # # get all successfully restored file in current restore conversation. # The successfully restored file are noted down as they don't require # require restore when called recursively during restore failover. # They also fetch back the actual restored filename. This is useful when # files are restored to OMF destination. # define 'x$getFileRestored' <<< procedure getFileRestored(proxy IN boolean) IS firstcall boolean := TRUE; ftype binary_integer; thread binary_integer; sequence number; resetscn number; resetstamp number; fno binary_integer; callagain binary_integer; fname varchar2(1024); begin loop callagain := sys.dbms_backup_restore.fetchFileRestored( firstcall => firstcall, proxy => proxy, ftype => ftype, fno => fno, thread => thread, sequence => sequence, resetscn => resetscn, resetstamp => resetstamp, fname => fname ); exit when callagain = 0; firstcall := FALSE; krmicd.fileRestored(ftype => ftype, fno => fno, thread => thread, sequence => sequence, resetscn => resetscn, resetstamp => resetstamp, fname => fname); end loop; end; >>> define 'x$stamp2date' <<< function stamp2date(stamp IN number) return date IS x number; dt varchar2(19); begin x := stamp; dt := to_char(mod(x,60), 'FM09'); -- seconds x := floor(x/60); dt := to_char(mod(x,60), 'FM09') || ':' || dt; -- minutes x := floor(x/60); dt := to_char(mod(x,24), 'FM09') || ':' || dt; -- hours x := floor(x/24); dt := to_char(mod(x,31)+1, 'FM09') || ' ' || dt; -- days x := floor(x/31); dt := to_char(mod(x,12)+1, 'FM09') || '/' || dt; -- months dt := to_char(floor(x/12)+1988) || '/' || dt; return to_date(dt, 'YYYY/MM/DD HH24:MI:SS'); end; >>> # refer to kcmbts in kcm.h to see where this code came from # this function is the inverse of the stamp2date function. define 'x$date2stamp' <<< function date2stamp(dt IN date) return number is stamp number; begin stamp := (((((to_number(to_char(dt, 'YYYY'))-1988)*12 + (to_number(to_char(dt, 'MM'))-1))*31 + (to_number(to_char(dt, 'DD'))-1))*24 + (to_number(to_char(dt, 'HH24'))))*60 + (to_number(to_char(dt, 'MI'))))*60 + (to_number(to_char(dt, 'SS'))); return stamp; end; >>> define 'x$dur2time' <<< procedure dur2time(dur IN OUT number, hours IN OUT number, mins IN OUT number, secs IN OUT number) is begin hours := floor(dur*24); dur := dur - hours/24; mins := floor(dur*24*60); dur := dur - mins/(24*60); secs := dur*24*60*60; end; >>> define 'x$entered' <<< procedure entered(func varchar2) is begin krmicd.writeTrc(func, rman_constant.TRACE_ENTER, '', rman_constant.DEBUG_PLSQL, 1); end; >>> define 'x$exited' <<< procedure exited(func varchar2, str varchar2) is begin krmicd.writeTrc(func, rman_constant.TRACE_EXIT, str, rman_constant.DEBUG_PLSQL, 1); end; >>> define 'x$bool2char' <<< function bool2char(b boolean) return varchar2 is begin if (b) then return 'TRUE'; else return 'FALSE'; end if; end; >>> define 'x$kcvfh' <<< DEFINE TABLE x$kcvfh ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, HXFIL NUMBER, HXONS NUMBER, HXSTS VARCHAR2(16), HXERR NUMBER, HXVER NUMBER, FHSWV NUMBER, FHCVN NUMBER, FHDBI NUMBER, FHDBN VARCHAR2(9), FHCSQ NUMBER, FHFSZ NUMBER, FHBSZ NUMBER, FHFNO NUMBER, FHTYP NUMBER, FHRDB NUMBER, FHCRS VARCHAR2(16), FHCRT VARCHAR2(20), FHRLC VARCHAR2(20), FHRLC_I NUMBER, FHRLS VARCHAR2(16), FHPRC VARCHAR2(20), FHPRC_I NUMBER, FHPRS VARCHAR2(16), FHBTI VARCHAR2(20), FHBSC VARCHAR2(16), FHBTH NUMBER, FHSTA NUMBER, FHSCN VARCHAR2(16), FHTIM VARCHAR2(20), FHTHR NUMBER, FHRBA_SEQ NUMBER, FHRBA_BNO NUMBER, FHRBA_BOF NUMBER, FHETB RAW(8), FHCPC NUMBER, FHRTS VARCHAR2(20), FHCCC NUMBER, FHBCP_SCN VARCHAR2(16), FHBCP_TIM VARCHAR2(20), FHBCP_THR NUMBER, FHBCP_RBA_SEQ NUMBER, FHBCP_RBA_BNO NUMBER, FHBCP_RBA_BOF NUMBER, FHBCP_ETB RAW(8), FHBHZ NUMBER, FHXCD RAW(16), FHTSN NUMBER, FHTNM VARCHAR2(30), FHRFN NUMBER, FHAFS VARCHAR2(16), FHRFS VARCHAR2(16), FHRFT VARCHAR2(20), HXIFZ NUMBER, HXNRCV NUMBER, HXFNM VARCHAR2(513), FHPOFB NUMBER, FHPNFB NUMBER ); >>> define 'x$kcvfhtmp' <<< define table x$kcvfhtmp ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, HTMPXFIL NUMBER, HTMPXONS NUMBER, HTMPXERR NUMBER, HTMPXVER NUMBER, FHTMPSWV NUMBER, FHTMPCVN NUMBER, FHTMPDBI NUMBER, FHTMPDBN VARCHAR2(9), FHTMPCSQ NUMBER, FHTMPFSZ NUMBER, FHTMPBSZ NUMBER, FHTMPFNO NUMBER, FHTMPTYP NUMBER, FHTMPCRS VARCHAR2(16), FHTMPCRT VARCHAR2(20), FHTMPSTA NUMBER, FHTMPCCC NUMBER, FHTMPXCD RAW(16), FHTMPTSN NUMBER, FHTMPTNM VARCHAR2(30), FHTMPRFN NUMBER, HTMPXFNM VARCHAR2(513) ); >>> define 'x$dual' <<< DEFINE TABLE x$dual ( ADDR RAW(4), INDX NUMBER, INST_ID NUMBER, DUMMY VARCHAR2(1) ); >>> define 'v$proxy_datafile' <<< DEFINE TABLE v$proxy_datafile ( RECID NUMBER, STAMP NUMBER, DEVICE_TYPE VARCHAR2(17), HANDLE VARCHAR2(513), COMMENTS VARCHAR2(81), MEDIA VARCHAR2(65), MEDIA_POOL NUMBER, TAG VARCHAR2(32), STATUS VARCHAR2(1), DELETED VARCHAR2(3), FILE# NUMBER, CREATION_CHANGE# NUMBER, CREATION_TIME DATE, RESETLOGS_CHANGE# NUMBER, RESETLOGS_TIME DATE, CHECKPOINT_CHANGE# NUMBER, CHECKPOINT_TIME DATE, ABSOLUTE_FUZZY_CHANGE# NUMBER, RECOVERY_FUZZY_CHANGE# NUMBER, RECOVERY_FUZZY_TIME DATE, INCREMENTAL_LEVEL NUMBER, ONLINE_FUZZY VARCHAR2(3), BACKUP_FUZZY VARCHAR2(3), BLOCKS NUMBER, BLOCK_SIZE NUMBER, OLDEST_OFFLINE_RANGE NUMBER, START_TIME DATE, COMPLETION_TIME DATE, ELAPSED_SECONDS NUMBER, CONTROLFILE_TYPE VARCHAR2(1), KEEP VARCHAR2(3), KEEP_UNTIL DATE, KEEP_OPTIONS VARCHAR2(13), RMAN_STATUS_RECID NUMBER, RMAN_STATUS_STAMP NUMBER, FOREIGN_DBID NUMBER, PLUGGED_READONLY VARCHAR2(3), PLUGIN_CHANGE# NUMBER, PLUGIN_RESETLOGS_CHANGE# NUMBER, PLUGIN_RESETLOGS_TIME DATE ); >>> define 'v$proxy_archivedlog' <<< DEFINE TABLE v$proxy_archivedlog ( RECID NUMBER, STAMP NUMBER, DEVICE_TYPE VARCHAR2(17), HANDLE VARCHAR2(513), COMMENTS VARCHAR2(81), MEDIA VARCHAR2(65), MEDIA_POOL NUMBER, TAG VARCHAR2(32), STATUS VARCHAR2(1), DELETED VARCHAR2(3), THREAD# NUMBER, SEQUENCE# NUMBER, RESETLOGS_CHANGE# NUMBER, RESETLOGS_TIME DATE, FIRST_CHANGE# NUMBER, FIRST_TIME DATE, NEXT_CHANGE# NUMBER, NEXT_TIME DATE, BLOCKS NUMBER, BLOCK_SIZE NUMBER, START_TIME DATE, COMPLETION_TIME DATE, ELAPSED_SECONDS NUMBER, RMAN_STATUS_RECID NUMBER, RMAN_STATUS_STAMP NUMBER, TERMINAL VARCHAR2(3), KEEP VARCHAR2(3), KEEP_UNTIL DATE, KEEP_OPTIONS VARCHAR2(13) ); >>> define 'x$is_recovery_area_enabled' <<< function is_recovery_area_enabled return boolean IS dbstate number := 0; begin select count(*) into dbstate from x$kccrdi where location is not null; if (dbstate = 0) then return FALSE; end if; return TRUE; end; >>> define 'x$aged_files' <<< procedure refreshAgedFiles is busy_retries number := 0; -- retry counter for ss enqueue busy err_msg varchar2(2048); begin if (NOT is_recovery_area_enabled()) then deb('refreshAgedFiles', 'recovery area is not enabled'); return; end if; deb('refreshAgedFiles', 'Starting refreshAgedFiles at ' || to_char(sysdate)); if (NOT krmicd.valRedoLogDeletionPolicy) then krmicd.writeMsg(8591); end if; <> begin sys.dbms_backup_restore.refreshAgedFiles; exception when sys.dbms_backup_restore.snapshot_enqueue_busy then -- retry up to 5 times, waiting 10 seconds between retries if busy_retries = 5 then err_msg := sqlerrm; krmicd.writeErrMsg(6764, err_msg); krmicd.writeMsg(8132); -- unable to refresh aged files krmicd.clearErrors; else busy_retries := busy_retries + 1; krmicd.writeMsg(8512); krmicd.sleep(10*busy_retries); krmicd.clearErrors; goto retry; end if; when others then err_msg := sqlerrm; krmicd.writeErrMsg(6764, err_msg); krmicd.writeMsg(8132); -- unable to refresh aged files krmicd.clearErrors; end; deb('refreshAgedFiles', 'Finished refreshAgedFiles at ' || to_char(sysdate)); end; >>> # #Is ArchivedLog Not Applied on standby?. #Returns 1 if not applied on standby database. Otherwise 0. #Resetlogs Time is of iso format YYYY-MM-DD HH24:MI:SS # #Note that it is not necessary that all archivelogs have applied bit set. #If any higher logs have this bit set, then it means all lower archivelogs #are applied. # define 'x$applied_al' <<< function isalnotapplied(recid IN number, stamp IN number, next_change IN number, resetlogs_change IN number, resetlogs_time IN varchar2) return boolean IS applied_change number; isnotapplied binary_integer := 1; begin -- Check if APPLIED bit is set for this record. select decode(al.applied, 'YES', 0, 1) into isnotapplied from v$archived_log al where al.recid = isalnotapplied.recid and al.stamp = isalnotapplied.stamp and al.resetlogs_change# = isalnotapplied.resetlogs_change and al.resetlogs_time = to_date(isalnotapplied.resetlogs_time, 'YYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=Gregorian'); if (isnotapplied = 1) then -- Check if any archivelogs generated after this has APPLIED bit -- set. select nvl(max(al.first_change#), 0) into applied_change from v$archived_log al where al.applied = 'YES' and al.standby_dest = 'NO' and al.resetlogs_change# = isalnotapplied.resetlogs_change and al.resetlogs_time = to_date(isalnotapplied.resetlogs_time, 'YYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=Gregorian'); if (next_change <= applied_change) then isnotapplied := 0; end if; end if; if (isnotapplied > 0) then return TRUE; else return FALSE; end if; end; >>> # #Is archivedLog needed for guaranteed restore point? #Returns TRUE if needed. Otherwise, FALSE. The query defined here must be in #sync with query defined in krmkisalforgrsp(). # define 'x$isalforgrsp' <<< function isalforgrsp(first_change IN number, next_change IN number, resetlogs_change IN number, resetlogs_time IN varchar2) return boolean is count_grsp number; begin select count(*) into count_grsp from x$kccrsp grsp, v$database_incarnation dbinc where grsp.rspincarn = dbinc.incarnation# and bitand(grsp.rspflags, 2) != 0 and bitand(grsp.rspflags, 1) = 1 -- Guaranteed and grsp.rspfscn <= grsp.rspscn -- filter archived log for clean grsp and grsp.rspfscn != 0 and dbinc.resetlogs_change# = isalforgrsp.resetlogs_change and dbinc.resetlogs_time = to_date(isalforgrsp.resetlogs_time, 'YYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=Gregorian') -- KRMS_ISO_DATE_FORMAT format set by RMAN and isalforgrsp.first_change <= (grsp.rspscn + 1) and isalforgrsp.next_change >= grsp.rspfscn; if (count_grsp > 0) then return TRUE; end if; return FALSE; end; >>> # #Delete DatafileCopy: added for bug-3174292 # define 'x$del_copy' <<< procedure del_copy(copy_recid IN number ,copy_stamp IN number ,fname IN varchar2 ,dfnumber IN binary_integer ,resetlogs_change IN number ,creation_change IN number ,checkpoint_change IN number ,blksize IN number ,no_delete IN binary_integer) IS begin sys.dbms_backup_restore.deleteDataFileCopy( recid => copy_recid, stamp => copy_stamp, fname => fname, dfnumber => dfnumber, resetlogs_change => resetlogs_change, creation_change => creation_change, checkpoint_change => checkpoint_change, blksize => blksize, no_delete => no_delete); if (dfnumber = 0) then krmicd.writeMsg(8072); krmicd.writeMsg(8516, fname, to_char(copy_recid), to_char(copy_stamp)); else krmicd.writeMsg(8070); krmicd.writeMsg(8513, fname, to_char(copy_recid), to_char(copy_stamp)); end if; end; >>> # #Delete Archivelog: added for bug-3174292 # define 'x$del_log' <<< procedure del_log(cfisstby IN boolean ,arch_recid IN number ,arch_stamp IN number ,fname IN varchar2 ,thread IN number ,sequence IN number ,resetlogs_change IN number ,resetlogs_time IN varchar2 ,first_change IN number ,blksize IN number ,next_change IN number ,first_time IN OUT boolean ,docopies IN boolean ,reqscn IN OUT number -- required scn ,rlgscn IN OUT number ,appscn IN OUT number -- applied scn ,apprlgscn IN OUT number ,alldest IN number ,reqbackups IN number ,nbackups IN number) IS skiparch_lal boolean := FALSE; -- TRUE if log is required locally skiparch_ral boolean := FALSE; -- TRUE is log is required remotely skiparch_nbck boolean := FALSE; -- TRUE if log is required for backups skiparch_grsp boolean := FALSE; err_msg varchar2(2048); begin -- Can delete archivelogs if we are doing copies as there exists -- another copy of archivelog. if (not docopies) then -- initialise reqscn, rlgscn, appscn, apprlgscn once and -- return back to caller to send it always. Note that each PL/SQL -- step can backup logs from one incarnation. So, we can safely -- assume that all logs are of same rlgscn. if (first_time) then krmicd.getArchiveLogDeletionSCN(alldest, reqscn, rlgscn, appscn, apprlgscn); deb('del_log', 'reqscn='||reqscn||',rlgscn='||rlgscn); deb('del_log', 'appscn='||appscn||',apprlgscn='||apprlgscn); end if; if ((next_change > appscn and resetlogs_change = apprlgscn) OR (resetlogs_change > apprlgscn)) then skiparch_lal := TRUE; deb('del_log', 'next_change='||next_change|| ',rlgscn='||resetlogs_change); elsif ((next_change > reqscn and resetlogs_change = rlgscn) OR (resetlogs_change > rlgscn)) then skiparch_ral := TRUE; deb('del_log', 'next_change='||next_change|| ',rlgscn='||resetlogs_change); elsif (reqbackups > nbackups) then -- if we don't have enough backups to delete the log, don't delete log skiparch_nbck := TRUE; else skiparch_grsp := isalforgrsp(first_change, next_change, resetlogs_change, resetlogs_time); end if; end if; -- The first time through, print a message -- that we are deleting the archived redo logs. if (first_time) then krmicd.writeMsg(8071, krmicd.getChid); end if; first_time := FALSE; if skiparch_lal then krmicd.writeMsg(8120); krmicd.writeMsg(8515, fname, to_char(thread), to_char(sequence)); elsif skiparch_ral then krmicd.writeMsg(8137); krmicd.writeMsg(8515, fname, to_char(thread), to_char(sequence)); elsif skiparch_nbck then krmicd.writeMsg(8138); krmicd.writeMsg(8515, fname, to_char(thread), to_char(sequence)); elsif skiparch_grsp then krmicd.writeMsg(8139); krmicd.writeMsg(8515, fname, to_char(thread), to_char(sequence)); else -- Print the identity of the archived redo log -- that we are about to delete. krmicd.writeMsg(8514, fname, to_char(arch_recid), to_char(arch_stamp)); begin sys.dbms_backup_restore.deleteArchivedLog( recid => arch_recid, stamp => arch_stamp, fname => fname, thread => thread, sequence => sequence, resetlogs_change => resetlogs_change, first_change => first_change, blksize => blksize); -- bug 1819338: -- Allow bual_del to continue on errors. exception when others then err_msg := sqlerrm; krmicd.writeMsg(8118); krmicd.writeMsg(8515, fname, to_char(thread), to_char(sequence)); krmicd.writeErrMsg(4005, err_msg); krmicd.clearErrors; end; end if; end; >>> # #check if database is in noarchivelog mode # define 'x$is_db_in_noarchivelog' <<< function is_db_in_noarchivelog return boolean is noarchivelog binary_integer; begin select count(*) into noarchivelog from v$database where log_mode = 'NOARCHIVELOG'; if (noarchivelog = 0) then return FALSE; else return TRUE; end if; end; >>> # # regdb: register a new database # define regdb <<< -- regdb declare db_name varchar2(8); reset_scn number; reset_time date; db_id number; cf_type binary_integer; cf_sequence binary_integer; begin select dbid, name, resetlogs_change#, resetlogs_time into db_id, db_name, reset_scn, reset_time from v$database; dbms_rcvcat.registerDatabase (db_id, db_name, reset_scn, reset_time); krmicd.writeMsg(8006, db_name); end; >>> # # resetdb: register a new database incarnation # define resetdb <<< -- resetdb declare db_name varchar2(8); reset_scn number; reset_time date; parent_reset_scn number; parent_reset_time date; db_id number; cf_type binary_integer; cf_sequence binary_integer; dbinc_key number; cfdbinc_key number; db_not_mounted EXCEPTION; PRAGMA EXCEPTION_INIT(db_not_mounted, -1507); incarnation_key_missing EXCEPTION; PRAGMA EXCEPTION_INIT(incarnation_key_missing, -20008); incarnation_already_registered EXCEPTION; PRAGMA EXCEPTION_INIT(incarnation_already_registered, -20009); begin &object& %IF% target if (dbinc_key is not NULL) then sys.dbms_backup_restore.resetdatabase(dbinc_key); krmicd.writeMsg(8066, dbinc_key); else raise incarnation_key_missing; end if; %ENDIF% target %IF% catalog if (dbinc_key is NULL) then select dbid, name, resetlogs_change#, resetlogs_time into db_id, db_name, reset_scn, reset_time from v$database; select diprs, to_date(diprc, 'MM/DD/RR HH24:MI:SS', 'NLS_CALENDAR=Gregorian') into parent_reset_scn, parent_reset_time from x$kccdi; begin dbms_rcvcat.resetDatabase(db_id => db_id, db_name => db_name, reset_scn => reset_scn, reset_time => reset_time, parent_reset_scn => parent_reset_scn, parent_reset_time => parent_reset_time); dbms_rcvcat.setReason(dbms_rcvcat.RESYNC_REASON_RESET); krmicd.writeMsg(8005, db_name); exception when incarnation_already_registered then krmicd.writeMsg(20009); when others then raise; end; else select upper(value) into db_name from v$parameter where name = 'db_name'; dbms_rcvcat.resetDatabase(dbinc_key, db_name, reset_scn, reset_time, db_id); krmicd.writeMsg(8066, dbinc_key); begin select incarnation# into cfdbinc_key from v$database_incarnation where resetlogs_change#=reset_scn and resetlogs_time =reset_time; sys.dbms_backup_restore.resetDatabase(cfdbinc_key); -- all errors other than not mounted are treated as warning exception when db_not_mounted then krmicd.clearErrors; when no_data_found then krmicd.writeMsg(6566); when others then krmicd.writeErrMsg(1005, sqlerrm); end; end if; %ENDIF% catalog end; >>> # # resync: resync recovery catalog from snapshot controlfile or # current controlfile. If snapshot, this # will update information about tablespaces, datafiles, # rollback and rman configuration segments. # Else just the circular record types are accessed. # define 'resync' <<< -- resync declare &constants& CONFIGRESYNC_NO CONSTANT number := 0; CONFIGRESYNC_TORC CONSTANT number := 1; CONFIGRESYNC_TOCF CONSTANT number := 2; CONFIGRESYNC_TORC_TOCF CONSTANT number := 3; mount_status varchar2(7); cf_type varchar2(7) := NULL; db_id number; db_name varchar2(8); ldb_unique_name varchar2(30); -- parameter db_unique_name reset_scn number; -- db current reset scn reset_time date; -- db current reset time err_msg varchar2(2048); -- flashback time and guaranteed flashback scn flashback_time date := to_date(NULL); prior_reset_scn number; -- db prior reset scn prior_reset_time date; -- db prior reset time snapcf varchar2(512); -- snapshot controlfile name name varchar2(512); -- default snapshot cf name cfname varchar2(512) := NULL; -- backup cf name ckp_scn number; ckp_time date; ckp_cf_seq number; cf_create_time date; getckptscn number; cf_version date; -- v$database.version_time kccdivts number; -- date2stamp(cf_version) recid number; high_cp_recid number; -- 1 high_rt_recid number; -- 2 high_le_recid number; -- 3 high_fe_recid number; -- 4 high_fn_recid number; -- 5 high_ts_recid number; -- 6 high_r1_recid number; -- 7 high_rm_recid number; -- 8 high_lh_recid number; -- 9 high_or_recid number; -- 10 high_al_recid number; -- 11 high_bs_recid number; -- 12 high_bp_recid number; -- 13 high_bf_recid number; -- 14 high_bl_recid number; -- 15 high_dc_recid number; -- 16 high_fc_recid number; -- 17 high_cc_recid number; -- 18 high_dl_recid number; -- 19 high_pc_recid number; -- 20 high_bi_recid number; -- 21 -- For the following record sections, we are going to find out hi recid by -- sql query from v$controlfile_record_section becasue we anyway query -- that view to get rec_size. high_ic_recid number; -- 22 'DATABASE INCARNATION' high_rsr_recid number; -- 24 'RMAN STATUS RECORDS' high_tf_recid number; high_grsp_recid number := 0; high_nrsp_recid number := 0; shigh_rsr_recid number; high_rout_stamp number; -- in-memory 'RMAN OUTPUT records' high record stamp inst_startup_time DATE; inst_startup_stamp number; high_bcr_recid number; -- 'DATABASE BLOCK CORRUPTION' low_bcr_recid number; full_resync boolean; -- set by rman compiler implicit boolean; -- set by rman compiler debug_resync boolean; -- set by rman compiler converted_cf boolean; -- set by rman compiler read_retries number := 0; -- retry counter for inconsistant_read busy_retries number := 0; -- retry counter for ss enqueue busy sort_retries number := 0; -- retry counter for sort_area_size sync_retries number := 0; -- retry counter for automatic resync conf_toCF boolean := FALSE; -- flags that tells how resync conf first boolean; force boolean; -- flag that tells do we force resync rc_aux_fname varchar2(1025); -- aux_name from recovery catalog rbs_count number := NULL; b boolean; read_only number; ret number; sort_area_size number; -- sort_area_size from v$parameter sort_area_size_init CONSTANT number := 10485760; sort_area_size_incr CONSTANT number := 10485760; rec_size number; -- record_size from v$controlfile_record_section total_recs number; high_recno number; rec_per_chunk integer; high number; low number; found number; releasecf boolean := FALSE; -- flag tells to release cfile enqueue parent_dbinc_key number; low_bs_recid number; low_bdf_recid number; low_bsf_recid number; low_brl_recid number; local_low number; local_high number; dbtype number := rman_constant.DEBUG_RESYNC; running_found boolean; -- flag used in resync of V$RMAN_STATUS resyncstamp number := 0; -- resync above this timestamp min_stamp number; -- minimum timestamp max_stamp number; -- maximum timestamp high_stamp number; -- to move high timestamp new_min_stamp number; -- retry using this minimum timestamp until_stamp number; -- check until this timestamp wasresyncstamp number; -- was resynced timestamp middle number; -- for binary search no_units number; -- number of units between min-max stamp maxdups number := 16; -- look for these many duplicates lh_lowscn number; -- low_scn of log history resynced -- -- This is the unit between min-max stamp in fraction of days. -- You can change this to find the granularity of previous resync stamp. -- For eg, 1 for a day granularity, 24 for hour granularity, -- 24 * 60 for minute granularity and 24 * 60 * 60 for seconds granularity -- and so on. This is the tradeoff between performance and precision. -- unit number := 2; -- 12 hours granularity least_stamp CONSTANT number := 0; greatest_stamp CONSTANT number := 2**32; resync_reason number; fullResyncBaseMsg number; resync_active boolean; resync_valid boolean; resync_added number; resync_dropped number; resync_changed number; resync_recreated number; resync_renamed number; resync_resized number; type rspname_t is table of x$kccrsp.rspname%type index by binary_integer; type numTab_t is table of number index by binary_integer; type timeTab_t is table of date index by binary_integer; type boolTab_t is table of varchar2(3) index by binary_integer; for_dbuname varchar2(30); source_cs varchar2(512); dest_cs varchar2(512); for_db_id number; ub4_cf_type binary_integer := 0; null_retVal varchar2(1); auto_prim_resync boolean := FALSE; need_primary_resync exception; pragma exception_init(need_primary_resync, -20079); db_id_mismatch exception; pragma exception_init(db_id_mismatch, -20109); sort_area_too_small EXCEPTION; PRAGMA EXCEPTION_INIT(sort_area_too_small, -1220); resync_not_needed exception; pragma exception_init(resync_not_needed, -20034); change_record_stamp exception; pragma exception_init(change_record_stamp, -20081); dbuname_mismatch exception; pragma exception_init(dbuname_mismatch, -20223); -- Each time a backup controlfile is mounted, the version_time is updated. -- We rely on this to decide when to ignore the high water marks. As of -- 8.1.6, we also rely on the version_time to filter out old circular -- records. Old ones are the ones that existed when the controlfile -- was made into a backup. These may be stale records if the user has -- since deleted them from the recovery catalog. We do not want to process -- such records because they might pollute the recover catalog. If the -- backup controlfile was created by RMAN, then we already resync those -- records anyway. -- cursor rs is select type, last_recid from v$controlfile_record_section; -- Bug 1058691: the old form of the ts query did not handle the case where -- a datafile was added to a tablespace AFTER the system clock was reset -- to a time earlier than when the tablespace was created. In that case, -- min(scn) and min(time) would no longer refer to the same row when the -- v$datafile table was aggregated on group(ts#). -- Bug 1887868: the new form of this query did fix bug 1058691, but performs -- very poorly when there are a large number of tablespaces and datafiles. -- The two things that improve performance the most are using x$kccfe instead -- of v$datafile and eliminating the nested queries. -- Use this form of the query when client-side pl/sql is fixed to support -- analytic functions. It is equivalent to the one below and easier to read: -- -- select ts.ts#, -- ts.name, -- ts.included_in_database_backup, -- to_number(fe.fecrc_scn) create_scn, -- to_date(fe.fecrc_tim,'MM/DD/RR HH24:MI:SS', -- 'NLS_CALENDAR=Gregorian') create_time -- from x$kccfe fe, -- (select distinct ts.*, -- first_value(fenum) over -- (partition by fe.fetsn order by to_number(fe.fecps)) -- fenum -- from x$kccfe fe, v$tablespace ts -- where fe.fetsn=ts.ts# and fe.fedup<>0) ts -- where ts.fenum=fe.fenum -- order by ts.ts# -- In the following query: -- subquery ts2 is v$tablespace, plus the file# of the file with the lowest -- creation SCN of any file, for each tablespace. -- -- subquery ts1 is v$tablespace, plus the lowest creation SCN of any file, -- for each tablespace. -- -- alias names are numbered from the inside out. cursor ts is select ts2.ts#, ts2.name, ts2.included_in_database_backup, to_number(fe2.fecrc_scn) create_scn, to_date(fe2.fecrc_tim,'MM/DD/RR HH24:MI:SS', 'NLS_CALENDAR=Gregorian') create_time, ts2.bigfile bigfile, 'NO' temporary, ts2.encrypt_in_backup, fe2.feplus plugin_scn from x$kccfe fe2, (select ts1.ts#, ts1.name, ts1.included_in_database_backup, min(fe1.fenum) fenum, ts1.bigfile bigfile, ts1.encrypt_in_backup from x$kccfe fe1, (select ts.ts#, ts.name, ts.included_in_database_backup, min(to_number(fe.fecrc_scn)) create_scn, ts.bigfile bigfile, ts.encrypt_in_backup from x$kccfe fe, v$tablespace ts where fe.fetsn=ts.ts# and fe.fedup<>0 group by ts.ts#, ts.name, ts.included_in_database_backup, ts.bigfile, ts.encrypt_in_backup ) ts1 where ts1.ts#=fe1.fetsn and to_number(fe1.fecrc_scn) = ts1.create_scn group by ts1.ts#, ts1.name, ts1.included_in_database_backup, ts1.bigfile, ts1.encrypt_in_backup ) ts2 where ts2.fenum=fe2.fenum and (fe2.fefdb = 0 or fe2.feplus != 0) union all select ts2.ts#, ts2.name, ts2.included_in_database_backup, to_number(tf2.tfcrc_scn) create_scn, to_date(tf2.tfcrc_tim,'MM/DD/RR HH24:MI:SS', 'NLS_CALENDAR=Gregorian') create_time, ts2.bigfile bigfile, 'YES' temporary, ts2.encrypt_in_backup, 0 plugin_scn from x$kcctf tf2, (select ts1.ts#, ts1.name, ts1.included_in_database_backup, min(tf1.tfnum) tfnum, ts1.bigfile bigfile, ts1.encrypt_in_backup from x$kcctf tf1, (select ts.ts#, ts.name, ts.included_in_database_backup, min(to_number(tf.tfcrc_scn)) create_scn, ts.bigfile bigfile, ts.encrypt_in_backup from x$kcctf tf, v$tablespace ts where tf.tftsn = ts.ts# and tf.tfdup != 0 and bitand(tf.tfsta, 32) != 32 group by ts.ts#, ts.name, ts.included_in_database_backup, ts.bigfile, ts.encrypt_in_backup ) ts1 where ts1.ts#=tf1.tftsn and to_number(tf1.tfcrc_scn) = ts1.create_scn group by ts1.ts#, ts1.name, ts1.included_in_database_backup, ts1.bigfile, ts1.encrypt_in_backup ) ts2 where ts2.tfnum=tf2.tfnum order by 1; -- When a current controlfile is made into a backup: -- all stop scns are cleared -- all SOR bits are cleared, and the WCC bit is set if the SOR bit was on. -- -- This query is only used during full resyncs, which are only taken from -- backup controlfiles. That is why we use the checkpoint scn (fecps) -- for the stop scn rather than the stop scn (fests), and check the WCC -- bit rather than the SOR bit. -- -- Also note that we don't use the absence of the CGE bit to determine if -- a file is read-only, because we always check the WCC bit first, which -- will never be set concurrently with the CGE bit. -- cursor df(low_fno number, high_fno number) IS SELECT fenum fileno, to_number(fe.fecrc_scn) create_scn, to_date(fe.fecrc_tim,'MM/DD/RR HH24:MI:SS', 'NLS_CALENDAR=Gregorian') create_time, fe.fecrc_thr create_thread, fe.fecsz create_size, fe.fetsn tsnum, fn.fnnam fname, fh.fhfsz fsize, fe.febsz block_size, to_number(fe.feofs) offline_scn, to_number(fe.feonc_scn) online_scn, to_date(fe.feonc_tim,'MM/DD/RR HH24:MI:SS', 'NLS_CALENDAR=Gregorian') online_time, to_number(fe.fecps) stop_scn, to_date(fe.festt, 'MM/DD/RR HH24:MI:SS', 'NLS_CALENDAR=Gregorian') stop_time, to_number(bitand(fe.festa, 4096)) clean_flag, -- This is the KCCFEWCC bit to_number(bitand(fe.festa, 4)) read_enabled_flag, to_number(bitand(fe.festa, 64)) missing_file_flag, -- this is KCCFECKD fe.ferfn rfileno, -- this is relative file number decode(fe.fepax, 0 , 'UNKNOWN' , 65535, 'NONE' , fnaux.fnnam) aux_fname, fe.fepdi foreign_dbid, fe.fefcrs foreign_create_scn, fe.fefcrt foreign_create_time, decode(fe.fefdb, 1, 'YES', 'NO') plugged_readonly, fe.feplus plugin_scn, fe.feprls plugin_reset_scn, fe.feprlt plugin_reset_time FROM x$kccfe fe, x$kccfn fn, x$kccfn fnaux, x$kcvfh fh WHERE fe.fepax = fnaux.fnnum(+) AND fn.fnfno=fe.fenum AND fn.fnfno=fh.hxfil AND fe.fefnh=fn.fnnum AND fe.fedup<>0 AND fnaux.fntyp(+)=22 AND fn.fntyp=4 AND fn.fnnam IS NOT NULL AND fe.fenum between low_fno and high_fno ORDER BY fe.fenum; cursor tf(low_fno number, high_fno number) IS SELECT tfnum fileno, to_number(tf.tfcrc_scn) create_scn, to_date(tf.tfcrc_tim,'MM/DD/RR HH24:MI:SS', 'NLS_CALENDAR=Gregorian') create_time, tf.tftsn tsnum, fn.fnnam fname, decode(nvl(fh.fhtmpfsz, 0), 0, tf.tfcsz, fh.fhtmpfsz) fsize, tf.tfbsz block_size, tf.tfrfn rfileno, -- this is relative file number decode(bitand(tf.tfsta, 16), 0, 'OFF', 'ON') autoextend, tf.tfmsz max_size, tf.tfnsz next_size FROM x$kcctf tf, x$kccfn fn, x$kcvfhtmp fh WHERE fn.fnfno=tf.tfnum AND fn.fnfno=fh.htmpxfil AND tf.tffnh=fn.fnnum AND tf.tfdup!=0 AND fn.fntyp=7 AND bitand(tf.tfsta, 32)!=32 AND fn.fnnam IS NOT NULL AND tf.tfnum between low_fno and high_fno ORDER BY tf.tfnum; cursor rt is select rtnum thread#, rtseq last_sequence#, to_number(rtenb) enable_scn, to_date(rtets,'MM/DD/RR HH24:MI:SS', 'NLS_CALENDAR=Gregorian') enable_time, to_number(rtdis) disable_scn, to_date(rtdit,'MM/DD/RR HH24:MI:SS', 'NLS_CALENDAR=Gregorian') disable_time, decode(bitand(rtsta,67), 0, 'D', 2, 'E', 3, 'E', 66, 'I') status from x$kccrt rt, x$kcctir tr where rtnlf != 0 -- groups is not zero and tr.inst_id = rt.inst_id -- join with x$kcctir and tirnum = rtnum and rt.inst_id = USERENV('Instance') -- belongs to this instance order by rtnum; -- -- Skip resyncing OMF template online redolog names. -- This later has to be investigated for fixing standby controlfile names. -- cursor orl is select thread#, group#, member fname, bytes, type from (select l.thread#, lf.group#, lf.member, l.bytes, 'ONLINE' type from v$log l, v$logfile lf where l.group# = lf.group# and l.thread# != 0 -- skip unassigned threads and nvl(lf.status, 'FOO') not in ('INVALID', 'DELETED', 'UNKNOWN') union all select l.thread#, lf.group#, lf.member, l.bytes, 'STANDBY' type from v$standby_log l, v$logfile lf where l.group# = lf.group# and l.thread# != 0 -- skip unassigned threads and nvl(lf.status, 'FOO') not in ('INVALID', 'DELETED', 'UNKNOWN')) order by nlssort(member, 'NLS_COMP=ANSI NLS_SORT=ASCII7'); -- bug 2107554 -- Both Guaranteed and Preserved restore points live in grsp catalog table cursor grsp is select rspname, rspfscn from_scn, rspscn to_scn, to_date(rsprsptime, 'MM/DD/YYYY HH24:MI:SS', 'NLS_CALENDAR=Gregorian') rsprsptime, to_date(rsptime, 'MM/DD/YYYY HH24:MI:SS', 'NLS_CALENDAR=Gregorian') rsptime, decode(bitand(rspflags, 1), 1, 'YES', 'NO') guaranteed, resetlogs_change#, resetlogs_time from x$kccrsp, v$database_incarnation where rspincarn = incarnation# and bitand(rspflags, 2) != 0 order by nlssort(rspname, 'NLS_COMP=ANSI NLS_SORT=ASCII7'); cursor nrsp(low number, high number) is select nrsname, nrsrid recid, nrsstm stamp, nrsincarn, nrsscn, to_date(nrsrsptime, 'MM/DD/YYYY HH24:MI:SS', 'NLS_CALENDAR=Gregorian') nrsrsptime, to_date(nrstime, 'MM/DD/YYYY HH24:MI:SS', 'NLS_CALENDAR=Gregorian') nrstime, decode(bitand(nrsflags, 2), 2, 0, 1) deleted, resetlogs_change# reset_scn, resetlogs_time reset_time from x$kccnrs r, v$database_incarnation d where r.nrsincarn = d.incarnation# and nrsrid between low and high and (nrsstm >= kccdivts OR nrsrid = high) and nrsstm >= resyncstamp order by nrsrid; -- kccdivts is not used here because the log history ranges are always valid -- without regards to type of controlfile (backup or current). If we get a -- duplicate log history, it will be re-validated. Same is true for offr -- cursor below. cursor rlh(low_recid number, high_recid number) is select recid, stamp, thread#, sequence#, first_change# low_scn, first_time low_time, next_change# next_scn, resetlogs_change#, resetlogs_time from v$log_history where recid between low_recid and high_recid and resetlogs_time is not null -- bug 3125145 and stamp >= resyncstamp order by recid; -- NB: The circular record type cursors below filter out records -- whose stamp is < kccdivts. dbms_rcvcat ignored such records, so -- there is no point in passing them to dbms_rcvcat. Refer to -- prvtrvct.sql for the reason why these are ignored. However, we -- always pass the record at the high_recid slot so that dbms_rcvcat -- can set the high water mark. cursor al(low_recid number, high_recid number, cf_type varchar2) is select recid, stamp, name, thread#, sequence#, resetlogs_change#, resetlogs_time, first_change#, first_time, next_change#, next_time, blocks, block_size, decode(archived, 'YES', 'Y', 'NO', 'N', 'UNKNOWN') archived, status, completion_time, decode(registrar, 'RFS', 'Y', 'SRMN', 'Y', 'RMAN', 'N', 'N') is_standby, dictionary_begin, dictionary_end, is_recovery_dest_file, compressed, creator, decode(end_of_redo_type, 'TERMINAL', 'YES', 'NO') terminal from v$archived_log where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and standby_dest = 'NO' and stamp >= resyncstamp order by recid; -- This cursor does a outer join of v$offline_range to x$kccfe. -- The reason is that x$kccfe does not show datafiles that have been dropped -- and whose file# reused. It is possible that a offline range record belongs -- to a dropped datafile. In this case, we cannot know the creation SCN -- of the datafile that owns the offline range. So we make it zero and -- insert it into the recovery catalog that way. Such ranges are never -- used. It is also possible that an offline range belongs to a -- dropped datafile, and for that datafile number to have been reused. -- To avoid associating the offline range with the wrong datafile incarnation -- we require that the datafile creation scn be < the offline range end. -- We use the offline range end rather than the start because of issues -- relating to V7->V8 migration of offline tablespaces, and because of -- TSPITR of read-only tablespaces. Under these circumstances, the datafile -- creation SCN may be > than the offline range start. -- kccdivts is not used - See comments for rlh cursor for why we don't use -- kccdivts here... cursor offr(low_recid number, high_recid number) is select /*+ first_rows */ offr.recid, offr.stamp, offr.file#, to_number(fe.fecrc_scn) creation_change#, offr.offline_change#,offr.online_change#,offr.online_time, offr.resetlogs_change#, offr.resetlogs_time from v$offline_range offr, x$kccfe fe where offr.file# = fe.fenum(+) and offr.online_change# > to_number(fe.fecrc_scn(+)) and (offr.recid between low_recid and high_recid or fe.fecrc_scn is null) -- bug 3408643 and offr.resetlogs_time is not null -- bug 3125145 and fe.fedup != 0 -- we are not interested in dropped files and stamp >= resyncstamp order by offr.recid; cursor bs(low_recid number, high_recid number) is select recid, stamp, set_stamp, set_count, backup_type, incremental_level, pieces, start_time, completion_time, controlfile_included, input_file_scan_only, keep_until, decode (keep_options, 'LOGS' , KEEP_LOGS , 'NOLOGS' , KEEP_NOLOGS , 'BACKUP_LOGS' , KEEP_CONSIST , 0) keep_options, block_size, multi_section from v$backup_set where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp order by recid; cursor bp(low_recid number, high_recid number) is select recid, stamp, set_stamp, set_count, piece#, copy#, tag, device_type, handle, comments, media, media_pool, concur, start_time, completion_time, status, bytes, is_recovery_dest_file, rman_status_recid, rman_status_stamp, compressed, encrypted, backed_by_osb from v$backup_piece where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp order by recid; cursor bdf(low_recid number, high_recid number) is select recid, stamp, set_stamp, set_count, file#, creation_change#, creation_time, resetlogs_change#, resetlogs_time, incremental_level, incremental_change#, checkpoint_change#, checkpoint_time, absolute_fuzzy_change#, datafile_blocks, blocks, block_size, oldest_offline_range, completion_time, controlfile_type, marked_corrupt, media_corrupt, logically_corrupt, blocks_read, used_change_tracking, used_optimization, foreign_dbid, plugged_readonly, plugin_change#, plugin_resetlogs_change#, plugin_resetlogs_time, section_size from v$backup_datafile where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp order by recid; -- resync the bdf records whose bs changed and not resynced by cursor bdf cursor bdfbs(low_recid number, high_recid number, low_bs_recid number, high_bs_recid number) is select bdf.recid, bdf.stamp, bdf.set_stamp, bdf.set_count, bdf.file#, bdf.creation_change#, bdf.creation_time, bdf.resetlogs_change#, bdf.resetlogs_time, bdf.incremental_level, bdf.incremental_change#, bdf.checkpoint_change#, bdf.checkpoint_time, bdf.absolute_fuzzy_change#, bdf.datafile_blocks, bdf.blocks, bdf.block_size, bdf.oldest_offline_range, bdf.completion_time, bdf.controlfile_type, bdf.marked_corrupt, bdf.media_corrupt, bdf.logically_corrupt, bdf.blocks_read, bdf.used_change_tracking, bdf.used_optimization, bdf.foreign_dbid, bdf.plugged_readonly, bdf.plugin_change#, bdf.plugin_resetlogs_change#, bdf.plugin_resetlogs_time, bdf.section_size from v$backup_datafile bdf, v$backup_set bs where bs.recid between low_bs_recid and high_bs_recid and (bs.stamp >= kccdivts OR bs.recid = high_bs_recid) and bs.stamp >= resyncstamp and bdf.set_stamp = bs.set_stamp and bdf.set_count = bs.set_count and bs.backup_type != 'L' -- ignore archivelog backups and bdf.recid between low_recid and high_recid order by bdf.recid; cursor bsf(low_recid number, high_recid number) is select recid, stamp, set_stamp, set_count, modification_time, bytes, completion_time, db_unique_name from v$backup_spfile where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp and modification_time is not null and completion_time is not null and bytes is not null order by recid; -- resync the bsf records whose bs changed and not resynced by cursor bsf cursor bsfbs(low_recid number, high_recid number, low_bs_recid number, high_bs_recid number) is select bsf.recid, bsf.stamp, bsf.set_stamp, bsf.set_count, bsf.modification_time, bsf.bytes, bsf.completion_time, bsf.db_unique_name from v$backup_spfile bsf, v$backup_set bs where bs.recid between low_bs_recid and high_bs_recid and (bs.stamp >= kccdivts OR bs.recid = high_bs_recid) and bs.stamp >= resyncstamp and bsf.set_stamp = bs.set_stamp and bsf.set_count = bs.set_count and bsf.modification_time is not null and bsf.completion_time is not null and bsf.bytes is not null and bs.backup_type != 'L' -- ignore archivelog backups and bsf.recid between low_recid and high_recid order by bsf.recid; cursor bcb(low_recid number, high_recid number) is select recid, stamp, set_stamp, set_count, piece#, file#, block#, blocks, corruption_change#, marked_corrupt, corruption_type from v$backup_corruption where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp order by recid; cursor brl(low_recid number, high_recid number) is select recid, stamp, set_stamp, set_count, thread#, sequence#, resetlogs_change#, resetlogs_time, first_change#, first_time, next_change#, next_time, blocks, block_size, terminal from v$backup_redolog where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp order by recid; -- resync the brl records whose bs changed and not resynced by cursor brl cursor brlbs(low_recid number, high_recid number, low_bs_recid number, high_bs_recid number) is select brl.recid, brl.stamp, brl.set_stamp, brl.set_count, brl.thread#, brl.sequence#, brl.resetlogs_change#, brl.resetlogs_time, brl.first_change#, brl.first_time, brl.next_change#, brl.next_time, brl.blocks, brl.block_size, brl.terminal from v$backup_redolog brl, v$backup_set bs where bs.recid between low_bs_recid and high_bs_recid and (bs.stamp >= kccdivts OR bs.recid = high_bs_recid) and bs.stamp >= resyncstamp and brl.set_stamp = bs.set_stamp and brl.set_count = bs.set_count and bs.backup_type = 'L' -- only archivelog backups and brl.recid between low_recid and high_recid order by brl.recid; cursor cdf(low_recid number, high_recid number) is select recid, stamp, name fname, tag, file#, creation_change# create_scn, creation_time create_time, resetlogs_change# reset_scn, resetlogs_time reset_time, incremental_level incr_level, checkpoint_change# ckp_scn, checkpoint_time ckp_time, absolute_fuzzy_change# abs_fuzzy_scn, online_fuzzy, backup_fuzzy, recovery_fuzzy_change# rcv_fuzzy_scn, recovery_fuzzy_time rcv_fuzzy_time, blocks, block_size, oldest_offline_range, status, completion_time, controlfile_type, keep_until, decode (keep_options, 'LOGS' , KEEP_LOGS , 'NOLOGS' , KEEP_NOLOGS , 'BACKUP_LOGS' , KEEP_CONSIST , 0) keep_options, scanned, is_recovery_dest_file, rman_status_recid, rman_status_stamp, marked_corrupt, foreign_dbid, plugged_readonly, plugin_change#, plugin_resetlogs_change#, plugin_resetlogs_time from v$datafile_copy where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp order by recid; cursor xdf(low_recid number, high_recid number) is select recid, stamp, tag, file#, creation_change# create_scn, creation_time create_time, resetlogs_change# reset_scn, resetlogs_time reset_time, incremental_level incr_level, checkpoint_change# ckp_scn, checkpoint_time ckp_time, absolute_fuzzy_change# abs_fuzzy_scn, online_fuzzy, backup_fuzzy, recovery_fuzzy_change# rcv_fuzzy_scn, recovery_fuzzy_time rcv_fuzzy_time, blocks, block_size, oldest_offline_range, device_type, handle, comments, media, media_pool, status, start_time, completion_time, controlfile_type, keep_until, decode (keep_options, 'LOGS' , KEEP_LOGS , 'NOLOGS' , KEEP_NOLOGS , 'BACKUP_LOGS' , KEEP_CONSIST , 0) keep_options, rman_status_recid, rman_status_stamp, foreign_dbid, plugged_readonly, plugin_change#, plugin_resetlogs_change#, plugin_resetlogs_time from v$proxy_datafile where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp order by recid; cursor xal(low_recid number, high_recid number) is select recid, stamp, tag, thread#, sequence#, resetlogs_change#, resetlogs_time, first_change#, first_time, next_change#, next_time, blocks, block_size, device_type, handle, comments, media, media_pool, status, start_time, completion_time, rman_status_recid, rman_status_stamp, terminal, keep_until, decode (keep_options, 'LOGS' , KEEP_LOGS , 'NOLOGS' , KEEP_NOLOGS , 'BACKUP_LOGS' , KEEP_CONSIST , 0) keep_options from v$proxy_archivedlog where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp order by recid; cursor ccb(low_recid number, high_recid number) is select recid, stamp, copy_recid, copy_stamp, file#, block#, blocks, corruption_change#, marked_corrupt, corruption_type from v$copy_corruption where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp order by recid; cursor bcr(low_recid number, high_recid number) is select blkrid recid, blkstm stamp, blkfno file#, blkcrs create_scn, blkcrt create_time, blksblk block#, blktot blocks, blkscn corrupt_scn, decode(blktype, 2, 'ALL ZERO', 3, 'FRACTURED', 4, 'CHECKSUM', 5, 'CORRUPT', 6, 'NOLOGGING', 'UNKNOWN') corruption_type from x$kccblkcor where blkrid between low_recid and high_recid and blkstm >= resyncstamp and blktype != 1 -- skip deleted records order by blkrid; -- Cursor for getting all deleted object. Note that deleted object -- with name "DATAFILE RENAME ON RESTORE" or "TEMPFILE RENAME" has -- object_data the file number of the file which name is changed. cursor dl(low_recid number, high_recid number) is select recid, stamp, type object_type, object_recid, object_stamp, object_data, type, (case when type = 'DATAFILE RENAME ON RESTORE' OR type = 'PLUGGED READONLY RENAME' then df.name when type = 'TEMPFILE RENAME' then tf.name else to_char(null) end) object_fname, (case when type = 'DATAFILE RENAME ON RESTORE' then df.creation_change# when type = 'PLUGGED READONLY RENAME' then df.plugin_change# when type = 'TEMPFILE RENAME' then tf.creation_change# else to_number(null) end) object_create_scn, set_stamp, set_count from v$deleted_object, (select fe.fenum file#, to_number(fe.fecrc_scn) creation_change#, fe.feplus plugin_change#, fn.fnnam name from x$kccfe fe, x$kccfn fn where fe.fenum = fn.fnfno and fe.fefnh = fn.fnnum and fe.fedup != 0 and fn.fntyp = 4 and fn.fnnam is not null and bitand(fn.fnflg, 4) != 4) df, (select tf.tfnum file#, to_number(tf.tfcrc_scn) creation_change#, fn.fnnam name from x$kcctf tf, x$kccfn fn where tf.tfnum = fn.fnfno and tf.tffnh = fn.fnnum and tf.tfdup != 0 and bitand(tf.tfsta, 32) != 32 and fn.fntyp = 7 and fn.fnnam is not null) tf where object_data = df.file#(+) and object_data = tf.file#(+) and recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp order by recid; cursor ic(low_recid number, high_recid number) is select resetlogs_change#, resetlogs_time, prior_resetlogs_change#, prior_resetlogs_time from v$database_incarnation where incarnation# between low_recid and high_recid order by resetlogs_change#; -- resync parent first -- bug 5247609 add rule hint to V$RMAN_STATUS query cursor rsr(low_recid number, high_recid number) is select /*+ rule */ recid, stamp, parent_recid, parent_stamp, session_recid, session_stamp, row_level, row_type, command_id, operation, status, mbytes_processed, start_time, end_time, input_bytes, output_bytes, optimized, object_type, output_device_type, osb_allocated from v$rman_status where recid between low_recid and high_recid and (stamp >= kccdivts OR recid = high_recid) and stamp >= resyncstamp order by recid; cursor rout(low_stamp number) is select recid, stamp, session_recid, session_stamp, rman_status_recid, rman_status_stamp, output from v$rman_output where stamp >= low_stamp and stamp >= resyncstamp order by session_recid, rman_status_recid; -- cursor to find out duplicate records cursor duprec_c(low_stamp number, high_stamp number) is select recid, stamp, 'AL' type from v$archived_log where status != 'D' and standby_dest = 'NO' and archived = 'YES' and stamp > low_stamp and stamp <= high_stamp union all select recid, stamp, 'BP' type from v$backup_piece where status != 'D' and stamp > low_stamp and stamp <= high_stamp union all select recid, stamp, 'DC' type from v$datafile_copy where status != 'D' and stamp > low_stamp and stamp <= high_stamp order by stamp desc; procedure deb_sort_area(max_high IN number) is begin -- maybe later we will support debug levels if debug_resync then deb('resync', 'sort_area_size='||sort_area_size, dbtype, rman_constant.LEVEL_HI); deb('resync', 'rec_size='||rec_size, dbtype, rman_constant.LEVEL_HI); deb('resync', 'rec_per_chunk='||rec_per_chunk, dbtype, rman_constant.LEVEL_HI); deb('resync', 'low= '||low||' max_high='||max_high, dbtype, rman_constant.LEVEL_HI); deb('resync', 'total_recs='||total_recs, dbtype, rman_constant.LEVEL_HI); end if; end; function set_sort_area_size(newsize in number) return number is s number; memmgmt number; maxrecsize number; inststatus varchar2(12); begin select count(*) into memmgmt from v$parameter where name='workarea_size_policy' and upper(value)='AUTO'; krmicd.clearErrors; if memmgmt = 0 then -- manual memory management deb('resync', 'manual memory management', dbtype); if newsize is not null then krmicd.execSql('alter session set sort_area_size=' || to_char(newsize)); krmicd.clearErrors; end if; select to_number(value) into s from v$parameter where name='sort_area_size'; if s < newsize then krmicd.writeMsg(1005, 'could not set sort_area_size to '|| to_char(newsize) || ', using ' || to_char(s) || ' instead.'); end if; else -- automatic memory management -- No need to adjust sort_area_size because it is automatic -- memory management. We must never spill to disk. select current_size into s from v$memory_dynamic_components where component='PGA Target'; deb('resync', 'Instance using automatic memory management, PGA Target size ' || s, dbtype); -- 9363515 Possibly due to memory mgmt issue but current_size was 0. -- Although a corner case, we cannot always rely on PGA Target size. select status into inststatus from v$instance; if inststatus in ('MOUNTED', 'OPEN') then select max(record_size) into maxrecsize from v$controlfile_record_section; else -- The max record_size from 11.2 is 8180 bytes, round up to 12k maxrecsize := 12288; end if; if s < maxrecsize then krmicd.writeMsg(1005, 'Cannot use PGA Target dynamic memory current_size ' || to_char(s) || ', using ' || to_char(maxrecsize) || ' instead.'); s := maxrecsize; end if; end if; return s; end set_sort_area_size; -- The following procedure resyncs all rman configuration records -- into recovery catalog. procedure resyncConf2Catalog(cf_type IN varchar2, current_cf IN boolean) IS -- cursors for RMAN configuration cursor conf_c is select conf#, name, value from v$rman_configuration; begin high_rm_recid := null; if current_cf then sys.dbms_backup_restore.getCkpt(getckptscn ,high_cp_recid ,high_rt_recid ,high_le_recid ,high_fe_recid ,high_fn_recid ,high_ts_recid ,high_r1_recid ,high_rm_recid ,high_lh_recid ,high_or_recid ,high_al_recid ,high_bs_recid ,high_bp_recid ,high_bf_recid ,high_bl_recid ,high_dc_recid ,high_fc_recid ,high_cc_recid ,high_dl_recid ,high_pc_recid ,high_bi_recid ); end if; -- Cleanup recovery catalog. Note that this will also clean up all -- configuration rows which have cleanup=TRUE. dbms_rcvcat.resetConfig2(TRUE, high_rm_recid); -- wipe out nodespecific if cf_type in ('CURRENT', 'CREATED') then dbms_rcvcat.resetConfig2(FALSE); -- wipe out generic end if; -- Now, resync records from the control file to recovery catalog. for confrec in conf_c loop deb('resync', 'Resyncing configuration cf_type '|| cf_type, dbtype); deb('resync', 'Resyncing configuration '|| confrec.conf#, dbtype); deb('resync', ' Name: ' || confrec.name || ' Value: '|| confrec.value, dbtype); -- Only for current/created controlfile, resync generic configurations if krmicd.isNodeSpecific(confrec.name, confrec.value) then dbms_rcvcat.setConfig2(confrec.conf#, confrec.name, confrec.value, TRUE); -- node specific elsif cf_type in ('CURRENT', 'CREATED') then dbms_rcvcat.setConfig2(confrec.conf#, confrec.name, confrec.value, FALSE); -- generic -- if we are storing a "db_unique_name" configuration entry -- check whether we need to add it to the node table. -- if the db_unique_name doesn't exist in the node table, -- add it; otherwise, nothing to do. if confrec.name = 'DB_UNIQUE_NAME' then dbms_rcvcat.resyncAddDBUname(confrec.value); end if; end if; end loop; end resyncConf2Catalog; -- The following procedure resyncs all rman configuration records in -- the recovery catalog into controlfile. procedure resyncConf2ControlFile(for_dbuname in varchar2, source_cs in varchar2) IS first boolean; conf_name varchar2(65); conf_value varchar2(1025); recid number; null_retVal varchar2(1); begin -- Delete all conf records in local/remote control file. if for_dbuname is not null then null_retVal := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'begin ' || ' sys.dbms_backup_restore.resetConfig;' || 'end;'); else sys.dbms_backup_restore.resetConfig; end if; first := TRUE; deb('resync', 'Pushing configuration to controlfile'); -- Loop thru all configurations in the recovery catalog and set them. loop begin conf_name := NULL; conf_value := NULL; dbms_rcvcat.getConfig(recid, conf_name, conf_value, first); if for_dbuname is not null then null_retVal := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'declare ' || ' recid number; ' || 'begin ' || ' recid := sys.dbms_backup_restore.setConfig (' || '''' || replace(conf_name,'''','''''') || ''',''' || replace(conf_value,'''','''''') || '''); '|| 'end;'); else recid := sys.dbms_backup_restore.setConfig (conf_name, conf_value); end if; deb('resync', 'Pushing conf name=' || conf_name || ';value=' || conf_value); first := FALSE; exception when no_data_found then krmicd.clearErrors; exit; end; end loop; end resyncConf2ControlFile; function getFlashbackTime(cftype varchar2) return date is flashback_on number; fb_retention_target number; fb_until_time date := to_date(null); recovery_time date; begin -- is flashback on? select decode(flashback_on, 'YES', 1, 0) into flashback_on from v$database; if (flashback_on = 0) then return to_date(null); -- flashback off end if; -- is db_flashback_retention_target parameter set? select di2fbret into fb_retention_target from x$kccdi2; if (nvl(fb_retention_target, 0) = 0) then return to_date(null); -- no retention target set end if; deb('resync', 'fb_retention_target= ' || to_char(fb_retention_target), dbtype); -- Get an optimistic value of fb time get flashback log if retention -- target is greater than 2 weeks. if (fb_retention_target > 14 * 60 * 24) then select min(to_date(fleltim, 'MM DD YYYY HH24:MI:SS', 'NLS_CALENDAR=Gregorian')) into fb_until_time from x$kccfle; deb('resync', 'fb_until_time= ' || nvl(to_char(fb_until_time), 'NULL'), dbtype); -- fleltim is not set on standby database. But, we have to set -- an upper limit as flashback time just to overcome incorrect -- db_flashback_retention_target. -- Let's have that as 2 weeks. fb_retention_target := 14 * 60 * 24; end if; -- There is no fb time or retention target is less than 2 weeks if (fb_until_time IS NULL) then -- Get incomplete recovery time only for standby database select decode(cftype, 'STANDBY', nvl(to_date(di2irt, 'MM DD YYYY HH24:MI:SS', 'NLS_CALENDAR=Gregorian'), sysdate), sysdate) into recovery_time from x$kccdi2; deb('resync', 'recovery_time= ' || to_char(recovery_time), dbtype); -- convert into timestamp from minutes. fb_until_time := recovery_time - fb_retention_target/(60 * 24); deb('resync', 'fb_until_time= ' || nvl(to_char(fb_until_time), 'NULL'), dbtype); end if; return fb_until_time; exception when no_data_found then return to_date(null); when others then raise; end; -- check if 19871 event is set to level 1 function is19871set return boolean is isset number; begin select count(*) into isset from v$parameter2 where name = 'event' and lower(value) = '19871 trace name context forever, level 1'; if isset > 0 then return true; else return false; end if; exception when others then return false; end; -- Find out the last time we resynced between high_stamp and min_stamp. -- The idea is to dynamically find out the last time resync happened by -- mining catalog schema for duplicate (recid, stamp). If we find out -- consecutive 16 duplicate records, then there is a very high probability -- that the timestamp associated with the record is closest to last time -- we resynced by an error factor of 1/unit day. -- function wasresynced(until_stamp IN number ,high_stamp IN number) return number is nodups number; -- number of duplicates high number; low number; resyncstamp number; begin high := high_stamp; low := until_stamp; nodups := 0; resyncstamp := 0; deb('resync', 'wasresynced high_stamp=' || high_stamp || ' high_date=' || stamp2date(high_stamp), dbtype); for duprec in duprec_c(low, high) loop if (dbms_rcvcat.isDuplicateRecord(recid => duprec.recid ,stamp => duprec.stamp ,type => duprec.type)) then if (resyncstamp = 0) then resyncstamp := duprec.stamp; end if; nodups := nodups + 1; if (nodups >= maxdups) then deb('resync', 'wasresynced resyncstamp=' || resyncstamp || ' resyncdate=' || stamp2date(resyncstamp), dbtype); return resyncstamp; end if; else -- couldn't find 16 consecutive duplicate records. deb('resync', 'wasresynced could not find record recid=' || duprec.recid || ' stamp=' || duprec.stamp || ' type=' || duprec.type || ' maxdups=' || nodups, dbtype); return 0; end if; end loop; -- Timestamp range not enough to satisfy the number of duplicates. -- Retry using a higher timestamp deb('resync', 'timestamp range not enough - nodups=' || nodups, dbtype); return -1; end; procedure resyncTempfiles is begin select record_size, last_recid into rec_size, high_tf_recid from v$controlfile_record_section where type = 'TEMPORARY FILENAME'; deb('resync', 'high_tf_recid= '|| high_tf_recid, dbtype); if dbms_rcvcat.beginTempFileResync(high_tf_recid) THEN rec_size := 1024 + rec_size; -- kccfn + kcctf rec_per_chunk := floor(sort_area_size / rec_size); -- find the high recid select max(tfnum) into high_recno from x$kcctf; found := 0; low := 1; deb_sort_area(0); loop high := low + rec_per_chunk - 1; for tfrec in tf(low, high) LOOP deb('resync', 'Resyncing tempfile '|| tfrec.fname, dbtype); IF tfrec.fsize = 0 THEN -- We don't have the filesize, so pass NULL so that we -- don't update the value in the catalog. tfrec.fsize := NULL; END IF; deb('resync', 'Calling checkTempFile for fileno '|| tfrec.fileno||' size '||tfrec.fsize, dbtype); dbms_rcvcat.checkTempFile( tfrec.fileno, tfrec.fname, tfrec.create_scn, tfrec.create_time, tfrec.fsize, tfrec.block_size, tfrec.tsnum, tfrec.rfileno, tfrec.autoextend, tfrec.max_size, tfrec.next_size); found := found + 1; end loop; IF (high >= high_recno) THEN deb('resync', 'Processed '||found|| ' tempfiles. Done', dbtype); goto all_tf_found; ELSE low := high + 1; END IF; end loop; <> dbms_rcvcat.endTempFileResync; end if; end; begin entered('resync'); debug_resync := FALSE; &resync_flag& <> -- come back here for automatic resync from primary if auto_prim_resync and sync_retries = 1 then select primary_db_unique_name into for_dbuname from v$database; end if; sort_area_size := set_sort_area_size(null); if sort_area_size < sort_area_size_init then sort_area_size := set_sort_area_size(sort_area_size_init); end if; deb('resync', 'Starting resync', dbtype); -- Write a message indicating resync from remote database is done if for_dbuname is not null then krmicd.writeMsg(6615, for_dbuname); end if; deb('resync', 'cfname is '|| cfname, dbtype); if cfname is null then select status into mount_status from v$instance; if mount_status not in ('MOUNTED', 'OPEN') then if full_resync then krmicd.writeMsg(8034); else krmicd.writeMsg(8035); end if; return; end if; else mount_status := 'BACKUP'; -- resync from backup cf end if; -- limitation of resync catalog from db_unique_name.. RMAN -- can not do resync of v$rman_output when resync from db_unique_name -- cfileMakeAndUseSnapshot. if for_dbuname is null then select startup_time into inst_startup_time from v$instance; inst_startup_stamp := date2stamp(inst_startup_time); end if; <> -- retry on makeAndUseSnapshot failure begin -- use a snapshot controlfile for full resync unless a backup controlfile -- is specified if (full_resync) then deb('resync', 'full_resync value is true', dbtype); else deb('resync', 'full_resync value is false', dbtype); end if; if (full_resync or for_dbuname is not null) then if (cfname is null) then -- select cf_type before making snapshot because cf_type is always -- 'BACKUP' after snapshot. select controlfile_type, db_id into cf_type, db_id from v$database; -- bug-10581508: We don't check if there is a snapshot -- filename in x$kccdi. -- In case that x$kccdi has no snapshot filename, -- the Oracle Server will use default location. -- (see kccmus() and cfileMakeAndUseSnapsho in file kcc.c) -- make a snapshot of the current controlfile if for_dbuname is null then sys.dbms_backup_restore.cfileMakeAndUseSnapshot( isstby => FALSE, source_dbuname => NULL, source_cs => NULL, dest_cs => NULL, for_resync => TRUE); else -- IF we are to resync from other site, pass the parameters to -- cfileMakeAndUseSnapshot. deb('resync', 'creating remote cf snapshot for '|| for_dbuname,dbtype); deb('resync', 'source connid '|| source_cs, dbtype); deb('resync', 'dest connid '|| dest_cs, dbtype); if source_cs is null then source_cs := sys.dbms_backup_restore.get_connect_identifier (dbuname => for_dbuname); if source_cs is null then krmicd.SignalErrMsg(6613, for_dbuname); return; end if; deb('resync', 'got source connid '|| source_cs, dbtype); end if; if dest_cs is null then select db_unique_name into ldb_unique_name from v$database; -- the first stmt ensures that the package is loaded in memory for -- subsequent SELECT stmt. null_retVal := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'declare ' || ' n varchar2(200);' || 'begin ' || ' n := sys.dbms_backup_restore.getCkptSCN;' || 'end;'); dest_cs := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'select ' || ' sys.dbms_backup_restore.get_connect_identifier('|| ' dbuname=>''' || ldb_unique_name || ''')' || ' from dual'); if dest_cs is null then krmicd.SignalErrMsg(6613, ldb_unique_name); return; end if; deb('resync', 'got dest connid '|| dest_cs, dbtype); end if; sys.dbms_backup_restore.cfileMakeAndUseSnapshot( isstby => FALSE, source_dbuname => for_dbuname, source_cs => source_cs, dest_cs => dest_cs, for_resync => TRUE); -- now get all information from control file belonging to -- for_dbuname DB_UNIQUE_NAME database node. select dbid, name, dbinc.resetlogs_change#, dbinc.resetlogs_time, controlfile_type, upper(db_unique_name) into for_db_id, db_name, reset_scn, reset_time, cf_type, ldb_unique_name from v$database db, v$database_incarnation dbinc where dbinc.status = 'CURRENT'; -- make sure the remote database has same DBID as target database if db_id <> for_db_id then raise db_id_mismatch; end if; -- make sure the db_unique_name in control file matches with -- user specified database site. Otherwise, it is an error. -- It is possible that the remote database sent the control file -- to some other RAC node of this database, and the snapshot -- location is not shared accross nodes. It can happen when -- the service names in connect identifiers are using Virtual IPs. -- Hence, just signal an user error. if lower(ldb_unique_name) <> lower(for_dbuname) then krmicd.writeMsg(6538, for_dbuname, ldb_unique_name); raise dbuname_mismatch; end if; mount_status := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'select status from v$instance'); deb('resync', 'remote site mount_status=' || mount_status, dbtype); -- if remote control file is backup, it must be created from primary -- and we need to perform full resync... if cf_type = 'BACKUP' then deb('resync', 'remote site is primary', dbtype); cf_type := 'CURRENT'; -- the value of binary cf_type should be same as krmkctCURRENT -- or CF_CURRENT ub4_cf_type := 1; else deb('resync', 'remote site is standby', dbtype); -- the value of binary cf_type should be same as krmkctSTANDBY -- or CF_STANDBY ub4_cf_type := 4; end if; -- call setDatabase, to change set control file attributes of -- remote db_unique_name site. begin dbms_rcvcat.setDatabase(db_name => db_name, reset_scn => reset_scn, reset_time => reset_time, db_id => db_id, db_unique_name => for_dbuname, dummy_instance => FALSE, cf_type => ub4_cf_type, site_aware => TRUE); -- clear set database bit so that when resync is done, -- the next command resets the database to local controlfile -- information. krmicd.checksetDatabase; exception -- If incarnation not known, register new incarnation -- and make it current in order to perform resync... TODO. when others then raise; end; end if; else -- use the backup controlfile sys.dbms_backup_restore.cfileUseCopy(cfname); end if; releasecf := TRUE; end if; exception when sys.dbms_backup_restore.snapshot_enqueue_busy then -- retry up to 180 times, waiting 20 seconds between retries, -- thus attempting to get a snapshot control file for an hour. -- Since we are displaying RMAN-08512 message, user will know -- that RMAN is waiting to get snapshot enqueue. if busy_retries = 180 then krmicd.writeMsg(20029, 'cannot make a snapshot controlfile'); raise; end if; busy_retries := busy_retries + 1; -- print this message every 5 minutes if (mod(busy_retries, 15) = 0) then krmicd.writeMsg(8512); end if; krmicd.sleep(20); krmicd.clearErrors; goto snapshot; end; -- snapshot controlfile stuff -- Here, the controlfile X$ tables are pointing at either -- the snapshot controlfile, the current controlfile, or -- a backup controlfile. if (cf_type is null) then -- cf_type not filled up yet select controlfile_type into cf_type from v$database; deb('resync', 'set cf_type to '|| cf_type); end if; if cf_type = 'STANDBY' then -- The user probably said: resync from backup controlfile. -- We only want to do partial resyncs from a standby controlfile. -- The datafile filenames in a standby cf are probably different from -- those in the primary cf, so we don't want to process them. deb('resync', 'Resyncing from a standby controlfile', dbtype); full_resync := FALSE; elsif cf_type = 'CURRENT' then deb('resync', 'Resyncing from a current controlfile', dbtype); elsif cf_type = 'BACKUP' then deb('resync', 'Resyncing from a backup controlfile', dbtype); full_resync := FALSE; else if full_resync then krmicd.writeMsg(8040); else krmicd.writeMsg(8041); end if; return; end if; <> -- retry on inconsistent read or sort_area_size overflow begin -- beginCkpt must be called only for first time. -- we do not release dbinc lock during retries. It will avoid another -- session waiting on resync to get thru causing this session to fail with -- invalid controlfile sequence# error. -- During retries we already know what type of resync must be done. if (sort_retries = 0 AND read_retries = 0) then select dbid, name, dbinc.resetlogs_change#, dbinc.prior_resetlogs_change#, dbinc.resetlogs_time, dbinc.prior_resetlogs_time, controlfile_created, controlfile_sequence#, controlfile_change#, controlfile_time, version_time into db_id, db_name, reset_scn, prior_reset_scn, reset_time, prior_reset_time, cf_create_time, ckp_cf_seq, ckp_scn, ckp_time, cf_version from v$database db, v$database_incarnation dbinc where dbinc.incarnation# = db.recovery_target_incarnation#; -- must get db_unique_name from control file only. select db_unique_name into ldb_unique_name from v$database; kccdivts := date2stamp(cf_version); -- used by circular record queries deb('resync', 'kccdivts= '||to_char(kccdivts), dbtype); if for_dbuname is not null then -- the first stmt ensures that the package is loaded in memory for -- subsequent SELECT stmt. null_retVal := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'declare ' || ' n varchar2(200);' || 'begin ' || ' n := sys.dbms_backup_restore.getCkptSCN;' || 'end;'); getckptscn := to_number( sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'select sys.dbms_backup_restore.getCkptSCN from dual')); else getckptscn := sys.dbms_backup_restore.getCkptSCN; end if; -- get high water marks for record types from snapshot control file -- by doing SELECT query select last_recid into high_cp_recid from v$controlfile_record_section where type = 'CKPT PROGRESS'; select last_recid into high_rt_recid from v$controlfile_record_section where type = 'REDO THREAD'; select last_recid into high_le_recid from v$controlfile_record_section where type = 'REDO LOG'; select last_recid into high_fe_recid from v$controlfile_record_section where type = 'DATAFILE'; select last_recid into high_fn_recid from v$controlfile_record_section where type = 'FILENAME'; select last_recid into high_ts_recid from v$controlfile_record_section where type = 'TABLESPACE'; select last_recid into high_r1_recid from v$controlfile_record_section where type = 'TEMPORARY FILENAME'; select last_recid into high_rm_recid from v$controlfile_record_section where type = 'RMAN CONFIGURATION'; select last_recid into high_lh_recid from v$controlfile_record_section where type = 'LOG HISTORY'; select last_recid into high_or_recid from v$controlfile_record_section where type = 'OFFLINE RANGE'; select last_recid into high_al_recid from v$controlfile_record_section where type = 'ARCHIVED LOG'; select last_recid into high_bs_recid from v$controlfile_record_section where type = 'BACKUP SET'; select last_recid into high_bp_recid from v$controlfile_record_section where type = 'BACKUP PIECE'; select last_recid into high_bf_recid from v$controlfile_record_section where type = 'BACKUP DATAFILE'; select last_recid into high_bl_recid from v$controlfile_record_section where type = 'BACKUP REDOLOG'; select last_recid into high_dc_recid from v$controlfile_record_section where type = 'DATAFILE COPY'; select last_recid into high_fc_recid from v$controlfile_record_section where type = 'BACKUP CORRUPTION'; select last_recid into high_cc_recid from v$controlfile_record_section where type = 'COPY CORRUPTION'; select last_recid into high_dl_recid from v$controlfile_record_section where type = 'DELETED OBJECT'; select last_recid into high_pc_recid from v$controlfile_record_section where type = 'PROXY COPY'; select last_recid into high_bi_recid from v$controlfile_record_section where type = 'BACKUP SPFILE'; if debug_resync then deb('resync', 'high_cp_recid= '||high_cp_recid, dbtype); deb('resync', 'high_rt_recid= '||high_rt_recid, dbtype); deb('resync', 'high_le_recid= '||high_le_recid, dbtype); deb('resync', 'high_fe_recid= '||high_fe_recid, dbtype); deb('resync', 'high_fn_recid= '||high_fn_recid, dbtype); deb('resync', 'high_ts_recid= '||high_ts_recid, dbtype); deb('resync', 'high_r1_recid= '||high_r1_recid, dbtype); deb('resync', 'high_rm_recid= '||high_rm_recid, dbtype); deb('resync', 'high_lh_recid= '||high_lh_recid, dbtype); deb('resync', 'high_or_recid= '||high_or_recid, dbtype); deb('resync', 'high_al_recid= '||high_al_recid, dbtype); deb('resync', 'high_bs_recid= '||high_bs_recid, dbtype); deb('resync', 'high_bp_recid= '||high_bp_recid, dbtype); deb('resync', 'high_bf_recid= '||high_bf_recid, dbtype); deb('resync', 'high_bl_recid= '||high_bl_recid, dbtype); deb('resync', 'high_dc_recid= '||high_dc_recid, dbtype); deb('resync', 'high_fc_recid= '||high_fc_recid, dbtype); deb('resync', 'high_cc_recid= '||high_cc_recid, dbtype); deb('resync', 'high_dl_recid= '||high_dl_recid, dbtype); deb('resync', 'high_pc_recid= '||high_pc_recid, dbtype); deb('resync', 'high_bi_recid= '||high_bi_recid, dbtype); end if; if (not full_resync) then ckp_scn := getckptscn; ckp_time := NULL; end if; begin if (full_resync or auto_prim_resync) then dbms_rcvcat.beginCkpt (ckp_scn, ckp_cf_seq, cf_version, ckp_time, 'FULL', mount_status, high_fe_recid, cf_type); else dbms_rcvcat.beginCkpt (ckp_scn, ckp_cf_seq, cf_version, ckp_time, 'PARTIAL', mount_status, high_fe_recid, cf_type); end if; exception when resync_not_needed then if implicit then krmicd.clearErrors; return; else raise; end if; when others then raise; end; if (full_resync) then if implicit then resync_reason := dbms_rcvcat.getReason; if resync_reason = dbms_rcvcat.RESYNC_REASON_NOACTION then fullResyncBaseMsg := 8002; elsif resync_reason = dbms_rcvcat.RESYNC_REASON_TS then fullResyncBaseMsg := 8200; elsif resync_reason = dbms_rcvcat.RESYNC_REASON_DF then fullResyncBaseMsg := 8205; elsif resync_reason = dbms_rcvcat.RESYNC_REASON_TF then fullResyncBaseMsg := 8210; elsif resync_reason = dbms_rcvcat.RESYNC_REASON_THR then fullResyncBaseMsg := 8215; elsif resync_reason = dbms_rcvcat.RESYNC_REASON_ORL then fullResyncBaseMsg := 8220; elsif resync_reason = dbms_rcvcat.RESYNC_REASON_RESET then fullResyncBaseMsg := 8224; elsif resync_reason = dbms_rcvcat.RESYNC_REASON_CONF then fullResyncBaseMsg := 8225; elsif resync_reason = dbms_rcvcat.RESYNC_REASON_RSL then fullResyncBaseMsg := 8226; elsif resync_reason = dbms_rcvcat.RESYNC_REASON_CF then fullResyncBaseMsg := 8227; elsif resync_reason = dbms_rcvcat.RESYNC_REASON_INC then fullResyncBaseMsg := 8228; else fullResyncBaseMsg := 8229; end if; if not auto_prim_resync then krmicd.writeMsg(fullResyncBaseMsg); end if; else if not auto_prim_resync then krmicd.writeMsg(8002); -- full resync end if; end if; else if not implicit and not auto_prim_resync then krmicd.writeMsg(1005, 'starting partial resync of recovery catalog'); end if; end if; end if; if (full_resync or auto_prim_resync) then select last_recid into high_tf_recid from v$controlfile_record_section where type = 'TEMPORARY FILENAME'; -- Because we cannot detect if rollback segments have been created or -- dropped since the previous full open database resync, we always -- resync tablespace information when the database is open. -- -- Because the same high_ts_recid is used to track temporary and -- permanent tablespace, we cannot make sure if we resynced the -- temporary tablespace which was introduced in 10gR2. So, force -- tablespace resync whenever there is a new tempfile found. -- if (mount_status = 'OPEN' OR dbms_rcvcat.tempFileToResync(high_tf_recid)) then force := TRUE; -- force tablespace resync deb('resync', 'Forcing tablespace resync, for '|| nvl(high_ts_recid, 0)||' tablespaces', dbtype); else force := FALSE; deb('resync', 'Checking if tablespace resync is needed, have '|| nvl(high_ts_recid, 0)||' tablespaces', dbtype); end if; krmicd.clearErrors; if dbms_rcvcat.beginTableSpaceResync(high_ts_recid, force) then for tsrec in ts loop if (mount_status = 'OPEN') then begin if for_dbuname is not null then rbs_count := to_number( sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'select count(us#) from undo$ u ' || ' where u.ts# = ' || tsrec.ts# || ' and u.status$ != 1')); else select count(us#) into rbs_count from undo$ u where u.ts# = tsrec.ts# and u.status$ != 1; end if; exception when others then err_msg := sqlerrm; krmicd.writeErrMsg(4005, err_msg); rbs_count := 0; raise; end; end if; deb('resync', 'Calling checkTableSpace', dbtype); deb('resync', ' for ts: '||tsrec.name||' ('||to_char(tsrec.ts#)|| '), cscn: '||to_char(tsrec.create_scn)|| ', plugin_scn: '||to_char(tsrec.plugin_scn)||' with '|| rbs_count||' rollback segments', dbtype); dbms_rcvcat.checkTableSpace (tsrec.name, tsrec.ts#, tsrec.create_scn, tsrec.create_time, rbs_count, tsrec.included_in_database_backup, tsrec.bigfile, tsrec.temporary, tsrec.encrypt_in_backup, tsrec.plugin_scn); end loop; deb('resync', 'Calling endTableSpaceResync', dbtype); dbms_rcvcat.endTableSpaceResync; end if; IF dbms_rcvcat.beginDataFileResync(high_fe_recid) THEN select record_size into rec_size from v$controlfile_record_section where type='DATAFILE'; rec_size := 1024 + rec_size; -- kccfn + kccfe rec_per_chunk := floor(sort_area_size / rec_size); -- total number of records select dindf into total_recs from x$kccdi; deb('resync', 'total files= '|| total_recs, dbtype); found := 0; low := 1; deb_sort_area(0); loop high := low + rec_per_chunk - 1; for dfrec in df(low, high) LOOP deb('resync', 'Resyncing datafile '|| dfrec.fname, dbtype); IF (dfrec.plugged_readonly = 'NO' OR dfrec.plugin_scn != 0) THEN IF (dfrec.clean_flag <> 0 OR -- the clean bit is on dfrec.plugged_readonly = 'YES') THEN IF dfrec.read_enabled_flag <> 0 THEN read_only := 1; ELSE read_only := 0; -- offline clean END IF; ELSE -- the file is not clean dfrec.stop_scn := NULL; dfrec.stop_time := NULL; read_only := 0; END IF; IF dfrec.fsize = 0 THEN -- We don't have the filesize, so pass NULL so that we -- don't update the value in the catalog. dfrec.fsize := NULL; END IF; IF dfrec.missing_file_flag <> 0 THEN -- The file is missing, so the controlfile does not -- have the real filename. Pass NULL to checkDatafile so -- that it does not change the filename in the catalog. dfrec.fname := NULL; END IF; deb('resync', 'Calling checkDataFile for fileno '|| dfrec.fileno||' size '||dfrec.fsize, dbtype); dbms_rcvcat.checkDataFile( dfrec.fileno, dfrec.fname, dfrec.create_scn, dfrec.create_time, dfrec.fsize, dfrec.block_size, dfrec.tsnum, dfrec.stop_scn, read_only, dfrec.stop_time, dfrec.rfileno, dfrec.aux_fname, dfrec.foreign_dbid, dfrec.foreign_create_scn, dfrec.foreign_create_time, dfrec.plugged_readonly, dfrec.plugin_scn, dfrec.plugin_reset_scn, dfrec.plugin_reset_time, dfrec.create_thread, dfrec.create_size); -- In case that target database has been just upgraded then -- aux_fname is unknown and we have to set it based on data -- stored in recovery catalog. IF (dfrec.aux_fname = 'UNKNOWN') THEN IF debug_resync THEN deb('resync', 'Calling getCloneName', dbtype); END IF; rc_aux_fname := dbms_rcvcat.getCloneName(dfrec.fileno, dfrec.create_scn, dfrec.plugin_scn); IF for_dbuname is not null THEN null_retVal := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'begin ' || ' sys.dbms_backup_restore.setDatafileAux(' || dfrec.fileno || ',''' || replace(rc_aux_fname,'''','''''') || '''); '|| 'end;'); ELSE sys.dbms_backup_restore.setDatafileAux(dfrec.fileno, rc_aux_fname); END IF; END IF; IF dfrec.offline_scn <> 0 then deb('resync', 'Calling checkOfflineRange: offline '|| dfrec.offline_scn||' online '|| dfrec.online_scn, dbtype); dbms_rcvcat.checkOfflineRange( null, null, dfrec.fileno, dfrec.create_scn, dfrec.offline_scn, dfrec.online_scn, dfrec.online_time, cf_create_time); END IF; END IF; found := found + 1; IF (found = total_recs) THEN deb('resync', 'Processed '||found|| ' datafiles. Done', dbtype); goto all_df_found; END IF; end loop; low := high + 1; end loop; <> dbms_rcvcat.endDataFileResync; end if; resyncTempfiles; deb('resync', 'high_rt_recid= '|| high_rt_recid, dbtype); if dbms_rcvcat.beginThreadResync(high_rt_recid) then for rtrec in rt loop deb('resync', 'Calling checkThread for thread '|| rtrec.thread#||' with sequence '|| rtrec.last_sequence#, dbtype); dbms_rcvcat.checkThread (rtrec.thread#, rtrec.last_sequence#, rtrec.enable_scn, rtrec.enable_time, rtrec.disable_scn, rtrec.disable_time, rtrec.status); end loop; dbms_rcvcat.endThreadResync; end if; deb('resync', 'high_le_recid= '|| high_le_recid, dbtype); if dbms_rcvcat.beginOnlineRedoLogResync(high_le_recid) then for orlrec in orl loop deb('resync','Calling checkOnlineRedoLog '|| orlrec.fname, dbtype); dbms_rcvcat.checkOnlineRedoLog (orlrec.thread#, orlrec.group#, orlrec.fname, orlrec.bytes, orlrec.type); end loop; dbms_rcvcat.endOnlineRedoLogResync; end if; if resync_reason != dbms_rcvcat.RESYNC_REASON_NOACTION then deb('resync', 'Checking for fullResyncAction', dbtype); dbms_rcvcat.getResyncActions(resync_valid, resync_added, resync_dropped, resync_changed, resync_recreated, resync_renamed, resync_resized); deb('resync', 'fullResyncAction.added: '||nvl(resync_added, -1), dbtype); deb('resync', 'fullResyncAction.dropped: '||nvl(resync_dropped, -1), dbtype); deb('resync', 'fullResyncAction.changed: '||nvl(resync_changed, -1), dbtype); deb('resync', 'fullResyncAction.recreated: '||nvl(resync_recreated, -1), dbtype); deb('resync', 'fullResyncAction.renamed: '||nvl(resync_renamed, -1), dbtype); deb('resync', 'fullResyncAction.resized: '||nvl(resync_resized, -1), dbtype); if resync_valid then deb('resync', 'Got a valid fullResyncAction', dbtype); krmicd.msgResyncActions(fullResyncBaseMsg, resync_added, resync_dropped, resync_changed, resync_recreated, resync_renamed, resync_resized); end if; end if; dbms_rcvcat.setReason(dbms_rcvcat.RESYNC_REASON_NONE, TRUE); end if; -- full_resync or auto_prim_resync IF cf_type = 'STANDBY' AND not converted_cf THEN IF dbms_rcvcat.beginOnlineRedoLogResync(high_le_recid) THEN for orlrec in orl loop deb('resync','Calling checkOnlineRedoLog '|| orlrec.fname, dbtype); dbms_rcvcat.checkOnlineRedoLog (orlrec.thread#, orlrec.group#, orlrec.fname, orlrec.bytes, orlrec.type); end loop; dbms_rcvcat.endOnlineRedoLogResync; END IF; IF dbms_rcvcat.beginDataFileResyncForStandby(high_fe_recid) THEN rec_size := 758 + 75; rec_per_chunk := floor(sort_area_size / rec_size); -- total number of records select dindf into total_recs from x$kccdi; found := 0; low := 1; deb_sort_area(0); loop high := low + rec_per_chunk - 1; for dfrec in df(low, high) LOOP deb('resync', 'Resyncing datafile for standby '|| dfrec.fname); IF (dfrec.plugged_readonly = 'NO' OR dfrec.plugin_scn != 0) THEN IF (dfrec.clean_flag <> 0 OR -- the clean bit is on dfrec.plugged_readonly = 'YES') THEN IF dfrec.read_enabled_flag <> 0 THEN read_only := 1; ELSE read_only := 0; -- offline clean END IF; ELSE -- the file is not clean dfrec.stop_scn := NULL; dfrec.stop_time := NULL; read_only := 0; END IF; IF dfrec.fsize = 0 THEN -- We don't have the filesize, so pass NULL so that we -- don't update the value in the catalog. dfrec.fsize := NULL; END IF; IF dfrec.missing_file_flag <> 0 THEN -- The file is missing, so the controlfile does not -- have the real filename. Pass NULL to checkDatafile so -- that it does not change the filename in the catalog. dfrec.fname := NULL; END IF; deb('resync', 'Calling checkDataFileForStandby for fileno '|| dfrec.fileno||' size '||dfrec.fsize); dbms_rcvcat.checkDataFileForStandby( dfrec.fileno, dfrec.fname, dfrec.create_scn, dfrec.create_time, dfrec.fsize, dfrec.block_size, dfrec.tsnum, dfrec.rfileno, dfrec.stop_scn, read_only, dfrec.foreign_dbid, dfrec.plugin_scn); END IF; found := found + 1; IF (found = total_recs) THEN deb('resync', 'Processed '||found|| ' datafiles. Done'); goto all_df_found_for_standby; END IF; end loop; low := high + 1; end loop; <> dbms_rcvcat.endDataFileResyncForStandby; end if; select record_size, last_recid into rec_size, high_tf_recid from v$controlfile_record_section where type = 'TEMPORARY FILENAME'; if dbms_rcvcat.beginTempFileResyncForStandby(high_tf_recid) THEN rec_size := 1024 + rec_size; -- kccfn + kcctf rec_per_chunk := floor(sort_area_size / rec_size); -- find the high recid select max(tfnum) into high_recno from x$kcctf; found := 0; low := 1; deb_sort_area(0); loop high := low + rec_per_chunk - 1; for tfrec in tf(low, high) LOOP deb('resync', 'Resyncing tempfile '|| tfrec.fname, dbtype); IF tfrec.fsize = 0 THEN -- We don't have the filesize, so pass NULL so that we -- don't update the value in the catalog. tfrec.fsize := NULL; END IF; deb('resync', 'Calling checkTempFile for fileno '|| tfrec.fileno||' size '||tfrec.fsize, dbtype); dbms_rcvcat.checkTempFileForStandby( tfrec.fileno, tfrec.fname, tfrec.create_scn, tfrec.create_time, tfrec.fsize, tfrec.block_size, tfrec.tsnum, tfrec.rfileno, tfrec.autoextend, tfrec.max_size, tfrec.next_size); found := found + 1; end loop; IF (high >= high_recno) THEN deb('resync', 'Processed '||found|| ' tempfiles. Done', dbtype); goto all_stby_tf_found; ELSE low := high + 1; END IF; end loop; <> dbms_rcvcat.endTempFileResyncForStandby; end if; END IF; if (not auto_prim_resync and dbms_rcvcat.doDuplicateMining) then -- -- As high water marks aren't stored controlfile that is not current -- (ie backup/standby), we resync all records including duplicates -- starting from kccdivts. This is a sheer wastage of resource when there -- are lot of duplicate records taking significant time for resync. -- -- To resolve this problem, we dynamically find out the last time -- resync happened by mining catalog schema for duplicate (recid, stamp). -- If we find out consecutive 16 duplicate records, then there is a -- very high probability that the timestamp associated with the record -- is closest to last time we resynced by an error factor of 1/unit day. -- (where unit is the constant declared - see comments above). -- -- To keep this simple, we just mine AL, BP and DC table. -- deb('resync', 'Begin mining previous resyncstamp', dbtype); select max(stamp1), min(stamp2) into max_stamp, until_stamp from (select nvl(max(al.stamp), least_stamp) stamp1, nvl(min(al.stamp), greatest_stamp) stamp2 from v$archived_log al where standby_dest = 'NO' and status != 'D' and archived = 'YES' and al.stamp >= kccdivts union all select nvl(max(bp.stamp), least_stamp) stamp1, nvl(min(bp.stamp), greatest_stamp) stamp2 from v$backup_piece bp where status != 'D' and bp.stamp >= kccdivts union all select nvl(max(dc.stamp), least_stamp) stamp1, nvl(min(dc.stamp), greatest_stamp) stamp2 from v$datafile_copy dc where status != 'D' and dc.stamp >= kccdivts); deb('resync', '(until_stamp, max_stamp)=(' || until_stamp || ',' || max_stamp || ')', dbtype); if (max_stamp >= until_stamp) then if is19871set then -- testing binary search unit := 24 * 60 * 60; -- second granularity maxdups := 2; -- look for two duplicates end if; min_stamp := until_stamp; <> no_units := stamp2date(max_stamp) - stamp2date(min_stamp); deb('resync', 'number of days apart=' || no_units || ' unit=' || unit, dbtype); no_units := ceil(no_units * unit); deb('resync', 'number of units apart=' || no_units, dbtype); high := no_units; low := 0; middle := 0; -- do a binary search to find a closest previous resyncstamp while (low <= high) loop middle := floor((low + high) / 2); deb('resync', 'high=' || high || ' low=' || low, dbtype); high_stamp := date2stamp(stamp2date(min_stamp) + middle/unit); wasresyncstamp := wasresynced(until_stamp, high_stamp); if (wasresyncstamp != 0) then resyncstamp := wasresyncstamp; low := middle + 1; if (wasresyncstamp = -1) then new_min_stamp := high_stamp; end if; else high := middle - 1; end if; end loop; -- if there is not enough duplicates, then change the unit to -- minutes with a last known min_stamp that found some duplicates if (resyncstamp = -1 and unit < 1440 and new_min_stamp > 0) then deb('resync', 'retry using new min_stamp ' || new_min_stamp, dbtype); unit := 1440; -- minute granularity min_stamp := new_min_stamp; goto retry_mining; end if; deb('resync', 'previous resyncstamp=' || resyncstamp, dbtype); end if; deb('resync', 'End mining previous resyncstamp', dbtype); end if; -- -- Resync Incarnation records -- select record_size, last_recid into rec_size, high_ic_recid from v$controlfile_record_section where type='DATABASE INCARNATION'; recid := dbms_rcvcat.beginIncarnationResync(return_Recid=>TRUE); deb('resync', 'Incarnation last recid ' ||recid||'; high '||high_ic_recid, dbtype); if (high_ic_recid > recid) then high := recid; low := recid+1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_ic_recid - low + 1; deb_sort_area(high_ic_recid); while (high < high_ic_recid) loop high := least(low + rec_per_chunk -1, high_ic_recid); for icrec in ic(low, high) loop deb('resync', 'Calling checkIncarnation with reset_scn: '|| icrec.resetlogs_change#||' reset_time: ' || icrec.prior_resetlogs_time, dbtype); parent_dbinc_key := dbms_rcvcat.checkIncarnation(icrec.resetlogs_change#, icrec.resetlogs_time, icrec.prior_resetlogs_change#, icrec.prior_resetlogs_time, db_name); end loop; low := high + 1; end loop; -- verify the current incarnation deb('resync', 'Calling checkIncarnation for current reset_scn: '|| reset_scn||' reset_time: '||reset_time, dbtype); parent_dbinc_key := dbms_rcvcat.checkIncarnation(reset_scn, reset_time, prior_reset_scn, prior_reset_time, db_name); end if; dbms_rcvcat.endIncarnationResync(kccdivts, high_ic_recid); -- Resync RMAN configuration rows, it is outside full resync because we can -- from 10i onwards, change the configuration for backup/standby control -- file. ret := dbms_rcvcat.beginConfigResync2(high_conf_recid => high_rm_recid); deb('resync', 'Configuration last recid '||high_rm_recid|| '; ret '|| ret, dbtype); -- Moving data from catalog to control file must be done after completing -- processing of any deleted object records. Also endConfigResync2 is done -- just before endCkpt to ensure high water marks are updated correctly -- at the end of configuration resync. if (ret = CONFIGRESYNC_TORC OR ret = CONFIGRESYNC_TORC_TOCF) then resyncConf2Catalog(cf_type, FALSE); end if; if auto_prim_resync then goto skip_circular_section_resync; end if; if (cfname is null and -- not a resync from controlfile copy cf_type != 'BACKUP') then -- not a backup controlfile flashback_time := getFlashbackTime(cf_type); select last_recid into high_grsp_recid from v$controlfile_record_section where type = 'GUARANTEED RESTORE POINT'; dbms_rcvcat.updateOldestFlashbackSCN( oldest_flashback_scn => NULL, -- obsolete argument oldest_flashback_time => flashback_time); deb('resync', 'high_grsp_recid = ' || high_grsp_recid, dbtype); if dbms_rcvcat.beginGuaranteedRPResync(high_grsp_recid) then for grsprec in grsp loop deb('resync', 'Calling checkGuaranteedRP ' || grsprec.rspname, dbtype); dbms_rcvcat.checkGuaranteedRP (grsprec.rspname, grsprec.from_scn, grsprec.to_scn, grsprec.resetlogs_change#, grsprec.resetlogs_time, grsprec.rsptime, grsprec.rsprsptime, grsprec.guaranteed); end loop; dbms_rcvcat.endGuaranteedRPResync; end if; end if; -- If you add any more circular record types, and if the cursor -- to get the records does a join, be sure to do an outer join so that -- we see all the circular records, regardless of the joined table -- state. -- Resync V$RMAN_STATUS records -- The resync of the V$RMAN_STATUS fixed view should be done first, because -- backup piece, copy and proxycopy will have recid and stamps pointing -- to some V$RMAN_STATUS row. Also, note that high_rsr_recid (one in RCVCAT) -- is always set to the recid of the oldest still running session. This is -- becasue if the "running" session ungracefully dies, then the next resync -- should fix the row status. -- recid := dbms_rcvcat.beginRmanStatusResync; -- -- The record size and id of the last record is get by quering -- v$controlfile_record_section. -- select record_size, last_recid into rec_size, high_rsr_recid from v$controlfile_record_section where type='RMAN STATUS'; running_found := FALSE; deb('resync', 'RmanStatusResync last recid '||recid|| '; high '||high_rsr_recid, dbtype); shigh_rsr_recid := high_rsr_recid; if (high_rsr_recid > recid) then high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_rsr_recid - low + 1; deb_sort_area(high_rsr_recid); while (high < high_rsr_recid) loop high := least(low + rec_per_chunk - 1, high_rsr_recid); deb('resync', 'Calling rsr cursor from: '|| low||' to: '||high, dbtype); for rsrrec in rsr(low, high) loop -- -- In case that we run into row which is still active (the status -- is 'running'), then we will put shigh_rsr_recid to the lowest -- recid of all running sessions. This is because we need to resync -- one more the rows with status running. -- bug 11872103 - don't consider jobs running more than 4 days. if (rsrrec.status like '%RUNNING%' and sysdate - rsrrec.start_time < 4 and not running_found ) then deb('resync', 'row '||low||' belongs to job still running: ' || rsrrec.status, dbtype); shigh_rsr_recid := rsrrec.recid - 1; running_found := TRUE; else deb('resync', 'row '||low||' belongs to finished job: ' || rsrrec.status, dbtype, rman_constant.LEVEL_HI); end if; deb('resync', 'Calling checkRmanStatus for '|| rsrrec.recid || ' with status '||rsrrec.status, dbtype); dbms_rcvcat.checkRmanStatus(rsrrec.recid, rsrrec.stamp, rsrrec.parent_recid, rsrrec.parent_stamp, rsrrec.row_level, rsrrec.row_type, rsrrec.command_id, rsrrec.operation, rsrrec.status, rsrrec.mbytes_processed, rsrrec.start_time, rsrrec.end_time, rsrrec.input_bytes, rsrrec.output_bytes, rsrrec.optimized, rsrrec.object_type, rsrrec.session_recid, rsrrec.session_stamp, rsrrec.output_device_type, rsrrec.osb_allocated); end loop; low := high + 1; end loop; end if; dbms_rcvcat.endRmanStatusResync(shigh_rsr_recid); if for_dbuname is not null then -- we can not resync rman output from remote instance as it is not in -- control file. So, we will display a debug msg saying rman output -- from remote site will not be resynced. This will be a limitation for -- remote resync. deb('resync', 'we can not resync rman output for remote instance'); else low := dbms_rcvcat.beginRmanOutputResync(inst_startup_stamp); deb('resync', 'RMAN output last RMAN output '||low); for routrec in rout(low) loop deb('resync', 'Calling checkRmanOutput with recid '||routrec.recid || ' stamp '|| routrec.stamp||' session '||routrec.session_recid || ' session stamp '|| routrec.session_stamp, dbtype, rman_constant.LEVEL_HI); deb('resync', 'rman_status_recid '||routrec.rman_status_recid || ' rman_status_stamp '|| routrec.rman_status_stamp, dbtype, rman_constant.LEVEL_HI); deb('resync', 'Calling checkRmanOutput with output '||routrec.output, dbtype, rman_constant.LEVEL_HI); dbms_rcvcat.checkRmanOutput(routrec.recid, routrec.stamp, routrec.session_recid, routrec.session_stamp, routrec.rman_status_recid, routrec.rman_status_stamp, routrec.output); end loop; dbms_rcvcat.endRmanOutputResync; end if; recid := dbms_rcvcat.beginLogHistoryResync; if (recid = 0) then lh_lowscn := dbms_rcvcat.getLogHistoryLowSCN; if (lh_lowscn > 0) then select nvl(max(recid), 0) into recid from v$log_history where first_change# <= lh_lowscn; end if; end if; deb('resync', 'Log History last recid '||recid|| '; high '||high_lh_recid, dbtype); if (high_lh_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='LOG HISTORY'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_lh_recid - low + 1; deb_sort_area(high_lh_recid); while (high < high_lh_recid) loop high := least(low + rec_per_chunk -1, high_lh_recid); for rlhrec in rlh(low, high) loop deb('resync', 'Calling checkLogHistory with recid '||rlhrec.recid || ' thread '|| rlhrec.thread#||' sequence '||rlhrec.sequence# || ' reset_scn '|| rlhrec.resetlogs_change#, dbtype); dbms_rcvcat.checkLogHistory( rlhrec.recid, rlhrec.stamp, rlhrec.thread#, rlhrec.sequence#, rlhrec.low_scn, rlhrec.low_time, rlhrec.next_scn, rlhrec.resetlogs_change#, rlhrec.resetlogs_time); end loop; low := high + 1; end loop; end if; dbms_rcvcat.endLogHistoryResync; recid := dbms_rcvcat.beginArchivedLogResync; deb('resync', 'Archive log last recid '||recid|| '; high '||high_al_recid, dbtype); if (high_al_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='ARCHIVED LOG'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_al_recid - low + 1; deb_sort_area(high_al_recid); while (high < high_al_recid) loop high := least(low + rec_per_chunk -1, high_al_recid); for alrec in al(low, high, cf_type) loop <> begin deb('resync', 'Calling checkArchivedLog for '|| alrec.name, dbtype); deb('resync', ' with sequence '||alrec.sequence#|| ' archived '||alrec.archived|| ' status '||alrec.status|| ' recid '||alrec.recid|| ' is_standby '||alrec.is_standby, dbtype); dbms_rcvcat.checkArchivedLog( alrec.recid, alrec.stamp, alrec.thread#, alrec.sequence#, alrec.resetlogs_change#, alrec.resetlogs_time, alrec.first_change#, alrec.first_time, alrec.next_change#, alrec.next_time, alrec.blocks, alrec.block_size, alrec.name, alrec.archived, alrec.completion_time, alrec.status, alrec.is_standby, null, null, alrec.is_recovery_dest_file, alrec.compressed, alrec.creator, alrec.terminal); exception when change_record_stamp then deb('resync', 'got exception: Changing stamp for this record'); if for_dbuname is not null then null_retVal := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'begin ' || ' sys.dbms_backup_restore.IncrementRecordStamp(' || ' rectype => ' || ' sys.dbms_backup_restore.RTYP_ARCHIVED_LOG ' || ' , recid => ' || alrec.recid || ' , stamp => ' || alrec.stamp || ' ); ' || 'end;'); else sys.dbms_backup_restore.IncrementRecordStamp( rectype => sys.dbms_backup_restore.RTYP_ARCHIVED_LOG, recid => alrec.recid, stamp => alrec.stamp); end if; alrec.stamp := alrec.stamp + 1; krmicd.clearErrors; goto al_resync_again; end; end loop; low := high + 1; end loop; end if; dbms_rcvcat.endArchivedLogResync; recid := dbms_rcvcat.beginOfflineRangeResync; deb('resync', 'Offline range last recid '||recid|| '; high '||high_or_recid, dbtype); if (high_or_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='OFFLINE RANGE'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_or_recid - low + 1; deb_sort_area(high_or_recid); while (high < high_or_recid) loop high := least(low + rec_per_chunk -1, high_or_recid); for offrrec in offr(low, high) LOOP deb('resync', 'Calling checkOfflineRange'|| ' recid: '||nvl(offrrec.recid,-1)|| ' file#: '||offrrec.file#|| ' creation_scn: '||nvl(offrrec.creation_change#, -1)|| ' offline_scn: '||offrrec.offline_change#|| ' online_scn: '||offrrec.online_change#, dbtype); dbms_rcvcat.checkOfflineRange( offrrec.recid, offrrec.stamp, offrrec.file#, offrrec.creation_change#, offrrec.offline_change#, offrrec.online_change#, offrrec.online_time, cf_create_time, offrrec.resetlogs_change#, offrrec.resetlogs_time); end loop; low := high + 1; end loop; end if; dbms_rcvcat.endOfflineRangeResync; -- -- Resync Restore Points -- -- Update the high_nrsp_recid select last_recid, record_size into high_nrsp_recid, rec_size from v$controlfile_record_section where type = 'RESTORE POINT'; deb('resync', 'high_nrsp_recid= '||high_nrsp_recid, dbtype); -- Must resync normal restore points before backup sets. -- BackupSet resync may query and update normal restorepoint entries. recid := dbms_rcvcat.beginRestorePointResync; deb('resync', 'Restore Point last recid '||recid|| '; high '|| high_nrsp_recid,dbtype); if (high_nrsp_recid > recid) then high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_nrsp_recid - low + 1; deb_sort_area(high_nrsp_recid); while (high < high_nrsp_recid) loop high := least(low + rec_per_chunk -1, high_nrsp_recid); for nrsprec in nrsp(low, high) loop deb('resync', 'Calling checkRestorePoint for recid '|| nrsprec.recid||' name '||nrsprec.nrsname, dbtype); dbms_rcvcat.checkRestorePoint( nrsprec.recid, nrsprec.stamp, nrsprec.nrsname, nrsprec.reset_scn, nrsprec.reset_time, nrsprec.nrsscn, nrsprec.nrsrsptime, nrsprec.nrstime, nrsprec.deleted); end loop; low := high + 1; end loop; end if; select min(nrsrid) into low from x$kccnrs; -- Purge catalog below cf recid dbms_rcvcat.endRestorePointResync(low); recid := dbms_rcvcat.beginBackupSetResync; low_bs_recid := recid; -- store this recid for bdf, brl resync deb('resync', 'Backup set last recid '||recid|| '; high '||high_bs_recid, dbtype); if (high_bs_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='BACKUP SET'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_bs_recid - low + 1; deb_sort_area(high_bs_recid); while (high < high_bs_recid) loop high := least(low + rec_per_chunk -1, high_bs_recid); for bsrec in bs(low, high) loop deb('resync', 'Calling checkBackupSet for set_stamp '|| bsrec.set_stamp||' set_count '||bsrec.set_count|| ' pieces: '||bsrec.pieces||' recid: '||bsrec.recid, dbtype); dbms_rcvcat.checkBackupSet( bsrec.recid, bsrec.stamp, bsrec.set_stamp, bsrec.set_count, bsrec.backup_type, bsrec.incremental_level, bsrec.pieces, bsrec.start_time, bsrec.completion_time, bsrec.controlfile_included, bsrec.input_file_scan_only, bsrec.keep_options, bsrec.keep_until, bsrec.block_size, bsrec.multi_section); end loop; low := high + 1; end loop; deb('resync', 'Backupset -> Backup DataFile', dbtype); low_bdf_recid := dbms_rcvcat.beginBackupDataFileResync; select nvl(min(bdf.recid) - 1, 0), nvl(max(bdf.recid), 0) into local_low, local_high from v$backup_set bs, v$backup_datafile bdf where ((bdf.recid <= low_bdf_recid and low_bdf_recid != 0) OR (bdf.stamp <= kccdivts and low_bdf_recid = 0)) and bs.recid between low_bs_recid and high_bs_recid and (bs.stamp >= kccdivts OR bs.recid = high_bs_recid) and bs.stamp >= resyncstamp and bs.set_stamp = bdf.set_stamp and bs.set_count = bdf.set_count and bs.backup_type != 'L'; -- ignore archivelog backups select record_size into rec_size from v$controlfile_record_section where type='BACKUP DATAFILE'; high := local_low; low := local_low + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := local_high - low + 1; deb_sort_area(local_high); while (high < local_high) loop high := least(low + rec_per_chunk -1, local_high); for bdfbsrec in bdfbs(low, high, low_bs_recid, high_bs_recid) loop deb('resync', 'Calling checkBackupDataFile for set_stamp '|| bdfbsrec.set_stamp||' set_count '||bdfbsrec.set_count, dbtype); deb('resync', ' file# '||bdfbsrec.file#||' recid '||bdfbsrec.recid, dbtype); dbms_rcvcat.checkBackupDataFile( bdfbsrec.recid, bdfbsrec.stamp, bdfbsrec.set_stamp, bdfbsrec.set_count, bdfbsrec.file#, bdfbsrec.creation_change#, bdfbsrec.creation_time, bdfbsrec.resetlogs_change#, bdfbsrec.resetlogs_time, bdfbsrec.incremental_level, bdfbsrec.incremental_change#, bdfbsrec.checkpoint_change#, bdfbsrec.checkpoint_time, bdfbsrec.absolute_fuzzy_change#, bdfbsrec.datafile_blocks, bdfbsrec.blocks, bdfbsrec.block_size, bdfbsrec.oldest_offline_range, bdfbsrec.completion_time, bdfbsrec.controlfile_type, bdfbsrec.marked_corrupt, bdfbsrec.media_corrupt, bdfbsrec.logically_corrupt, FALSE, bdfbsrec.blocks_read, bdfbsrec.used_change_tracking, bdfbsrec.used_optimization, bdfbsrec.foreign_dbid, bdfbsrec.plugged_readonly, bdfbsrec.plugin_change#, bdfbsrec.plugin_resetlogs_change#, bdfbsrec.plugin_resetlogs_time, bdfbsrec.section_size); end loop; low := high + 1; end loop; dbms_rcvcat.endBackupDataFileResync; deb('resync', 'Backupset -> Backup Spfile', dbtype); low_bsf_recid := dbms_rcvcat.beginBackupSpFileResync; select nvl(min(bsf.recid) - 1, 0), nvl(max(bsf.recid), 0) into local_low, local_high from v$backup_set bs, v$backup_spfile bsf where ((bsf.recid <= low_bsf_recid and low_bsf_recid != 0) OR (bsf.stamp <= kccdivts and low_bsf_recid = 0)) and bs.recid between low_bs_recid and high_bs_recid and (bs.stamp >= kccdivts OR bs.recid = high_bs_recid) and bs.stamp >= resyncstamp and bs.set_stamp = bsf.set_stamp and bs.set_count = bsf.set_count and bs.backup_type != 'L'; -- ignore archivelog backups select record_size into rec_size from v$controlfile_record_section where type='BACKUP SPFILE'; high := local_low; low := local_low + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := local_high - low + 1; deb_sort_area(local_high); while (high < local_high) loop high := least(low + rec_per_chunk -1, local_high); for bsfbsrec in bsfbs(low, high, low_bs_recid, high_bs_recid) loop deb('resync', 'Calling checkBackupSpFile for set_stamp '|| bsfbsrec.set_stamp||' set_count '||bsfbsrec.set_count ||' recid '||bsfbsrec.recid, dbtype); dbms_rcvcat.checkBackupSpFile( bsfbsrec.recid, bsfbsrec.stamp, bsfbsrec.set_stamp, bsfbsrec.set_count, bsfbsrec.modification_time, bsfbsrec.bytes, FALSE, bsfbsrec.db_unique_name); end loop; low := high + 1; end loop; dbms_rcvcat.endBackupSpFileResync; deb('resync', 'Backupset -> Backup RedoLog', dbtype); low_brl_recid := dbms_rcvcat.beginBackupRedoLogResync; select nvl(min(brl.recid) - 1, 0), nvl(max(brl.recid), 0) into local_low, local_high from v$backup_set bs, v$backup_redolog brl where ((brl.recid <= low_brl_recid and low_brl_recid != 0) OR (brl.stamp <= kccdivts and low_brl_recid = 0)) and bs.recid between low_bs_recid and high_bs_recid and (bs.stamp >= kccdivts OR bs.recid = high_bs_recid) and bs.stamp >= resyncstamp and bs.set_stamp = brl.set_stamp and bs.set_count = brl.set_count and bs.backup_type = 'L'; -- only archivelog backups select record_size into rec_size from v$controlfile_record_section where type='BACKUP REDOLOG'; high := local_low; low := local_low + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := local_high - low + 1; deb_sort_area(local_high); while (high < local_high) loop high := least(low + rec_per_chunk -1, local_high); for brlbsrec in brlbs(low, high, low_bs_recid, high_bs_recid) loop deb('resync', 'Calling checkBackupRedoLog for set_stamp '|| brlbsrec.set_stamp||' set_count '||brlbsrec.set_count|| ' recid '||brlbsrec.recid, dbtype); deb('resync', ' sequence '||brlbsrec.sequence#|| ' first change '||brlbsrec.first_change#|| ' next change '||brlbsrec.next_change#, dbtype); dbms_rcvcat.checkBackupRedoLog( brlbsrec.recid, brlbsrec.stamp, brlbsrec.set_stamp, brlbsrec.set_count, brlbsrec.thread#, brlbsrec.sequence#, brlbsrec.resetlogs_change#, brlbsrec.resetlogs_time, brlbsrec.first_change#, brlbsrec.first_time, brlbsrec.next_change#, brlbsrec.next_time, brlbsrec.blocks, brlbsrec.block_size, FALSE, brlbsrec.terminal); end loop; low := high + 1; end loop; dbms_rcvcat.endBackupRedoLogResync; end if; dbms_rcvcat.endBackupSetResync; recid := dbms_rcvcat.beginBackupPieceResync; deb('resync', 'Backup piece last recid '||recid|| '; high '||high_bp_recid, dbtype); if (high_bp_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='BACKUP PIECE'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_bp_recid - low + 1; deb_sort_area(high_bp_recid); while (high < high_bp_recid) loop high := least(low + rec_per_chunk -1, high_bp_recid); for bprec in bp(low, high) loop << bp_resync_again>> begin deb('resync', 'Calling checkBackupPiece for stamp '|| bprec.stamp||' recid '||bprec.recid, dbtype); deb('resync', ' Handle '||bprec.handle||' status '|| bprec.status||' piece# '||bprec.piece#, dbtype); deb('resync', 'set_stamp '||bprec.set_stamp|| ' set_count '||bprec.set_count, dbtype); dbms_rcvcat.checkBackupPiece( bprec.recid, bprec.stamp, bprec.set_stamp, bprec.set_count, bprec.piece#, bprec.tag, bprec.device_type, bprec.handle, bprec.comments, bprec.media, bprec.concur, bprec.start_time, bprec.completion_time, bprec.status, bprec.copy#, bprec.media_pool, bprec.bytes, bprec.is_recovery_dest_file, bprec.rman_status_recid, bprec.rman_status_stamp, bprec.compressed, bprec.encrypted, bprec.backed_by_osb); exception when change_record_stamp then deb('resync', 'got exception: Changing stamp for this record'); if for_dbuname is not null then null_retVal := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'begin ' || ' sys.dbms_backup_restore.IncrementRecordStamp(' || ' rectype => ' || ' sys.dbms_backup_restore.RTYP_BACKUP_PIECE ' || ' , recid => ' || bprec.recid || ' , stamp => ' || bprec.stamp || ' ); ' || 'end;'); else sys.dbms_backup_restore.IncrementRecordStamp( rectype => sys.dbms_backup_restore.RTYP_BACKUP_PIECE, recid => bprec.recid, stamp => bprec.stamp); end if; bprec.stamp := bprec.stamp + 1; krmicd.clearErrors; goto bp_resync_again; end; end loop; low := high + 1; end loop; end if; dbms_rcvcat.endBackupPieceResync; recid := dbms_rcvcat.beginBackupDataFileResync; deb('resync', 'Backup DataFile last recid '||recid|| '; high '||high_bf_recid, dbtype); if (high_bf_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='BACKUP DATAFILE'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_bf_recid - low + 1; deb_sort_area(high_bf_recid); while (high < high_bf_recid) loop high := least(low + rec_per_chunk -1, high_bf_recid); for bdfrec in bdf(low, high) loop deb('resync', 'Calling checkBackupDataFile for set_stamp '|| bdfrec.set_stamp||' set_count '||bdfrec.set_count || ' recid '||bdfrec.recid, dbtype); deb('resync', ' file# '||bdfrec.file#, dbtype); dbms_rcvcat.checkBackupDataFile( bdfrec.recid, bdfrec.stamp, bdfrec.set_stamp, bdfrec.set_count, bdfrec.file#, bdfrec.creation_change#, bdfrec.creation_time, bdfrec.resetlogs_change#, bdfrec.resetlogs_time, bdfrec.incremental_level, bdfrec.incremental_change#, bdfrec.checkpoint_change#, bdfrec.checkpoint_time, bdfrec.absolute_fuzzy_change#, bdfrec.datafile_blocks, bdfrec.blocks, bdfrec.block_size, bdfrec.oldest_offline_range, bdfrec.completion_time, bdfrec.controlfile_type, bdfrec.marked_corrupt, bdfrec.media_corrupt, bdfrec.logically_corrupt, TRUE, bdfrec.blocks_read, bdfrec.used_change_tracking, bdfrec.used_optimization, bdfrec.foreign_dbid, bdfrec.plugged_readonly, bdfrec.plugin_change#, bdfrec.plugin_resetlogs_change#, bdfrec.plugin_resetlogs_time, bdfrec.section_size); end loop; low := high + 1; end loop; end if; dbms_rcvcat.endBackupDataFileResync; recid := dbms_rcvcat.beginBackupSpFileResync; deb('resync', 'Backup SPFILE last recid '||recid|| '; high '||high_bi_recid, dbtype); if (high_bi_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='BACKUP SPFILE'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_bi_recid - low + 1; deb_sort_area(high_bi_recid); while (high < high_bi_recid) loop high := least(low + rec_per_chunk - 1, high_bi_recid); for bsfrec in bsf(low, high) loop deb('resync', 'Calling checkBackupSpFile for set_stamp '|| bsfrec.set_stamp||' set_count '||bsfrec.set_count || ' recid '||bsfrec.recid, dbtype); dbms_rcvcat.checkBackupSpFile( bsfrec.recid, bsfrec.stamp, bsfrec.set_stamp, bsfrec.set_count, bsfrec.modification_time, bsfrec.bytes, TRUE, bsfrec.db_unique_name); end loop; low := high + 1; end loop; end if; dbms_rcvcat.endBackupSpFileResync; recid := dbms_rcvcat.beginBackupCorruptionResync; deb('resync', 'Backup Corruption last recid '||recid|| '; high '||high_fc_recid, dbtype); if (high_fc_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='BACKUP CORRUPTION'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_fc_recid - low + 1; deb_sort_area(high_fc_recid); while (high < high_fc_recid) loop high := least(low + rec_per_chunk -1, high_fc_recid); for bcbrec in bcb(low, high) loop deb('resync', 'Calling checkBackupCorruption for set_stamp '|| bcbrec.set_stamp||' set_count '||bcbrec.set_count, dbtype); deb('resync', ' file# '||bcbrec.file# ||' recid '||bcbrec.recid, dbtype); dbms_rcvcat.checkBackupCorruption( bcbrec.recid, bcbrec.stamp, bcbrec.set_stamp, bcbrec.set_count, bcbrec.piece#, bcbrec.file#, bcbrec.block#, bcbrec.blocks, bcbrec.corruption_change#, bcbrec.marked_corrupt, bcbrec.corruption_type); end loop; low := high + 1; end loop; end if; dbms_rcvcat.endBackupCorruptionResync; recid := dbms_rcvcat.beginBackupRedoLogResync; deb('resync', 'Backup RedoLog last recid '||recid|| '; high '||high_bl_recid, dbtype); if (high_bl_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='BACKUP REDOLOG'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_bl_recid - low + 1; deb_sort_area(high_bl_recid); while (high < high_bl_recid) loop high := least(low + rec_per_chunk -1, high_bl_recid); for brlrec in brl(low, high) loop deb('resync', 'Calling checkBackupRedoLog for set_stamp '|| brlrec.set_stamp||' set_count '||brlrec.set_count|| ' recid '||brlrec.recid, dbtype); deb('resync', ' sequence '||brlrec.sequence#|| ' first change '||brlrec.first_change#|| ' next change '||brlrec.next_change#, dbtype); dbms_rcvcat.checkBackupRedoLog( brlrec.recid, brlrec.stamp, brlrec.set_stamp, brlrec.set_count, brlrec.thread#, brlrec.sequence#, brlrec.resetlogs_change#, brlrec.resetlogs_time, brlrec.first_change#, brlrec.first_time, brlrec.next_change#, brlrec.next_time, brlrec.blocks, brlrec.block_size, TRUE, brlrec.terminal); end loop; low := high + 1; end loop; end if; dbms_rcvcat.endBackupRedoLogResync; recid := dbms_rcvcat.beginDataFileCopyResync; deb('resync', 'DataFIleCopy last recid '||recid|| '; high '||high_dc_recid, dbtype); if (high_dc_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='DATAFILE COPY'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_dc_recid - low + 1; deb_sort_area(high_dc_recid); while (high < high_dc_recid) loop high := least(low + rec_per_chunk -1, high_dc_recid); for cdfrec in cdf(low, high) loop << cdf_resync_again >> begin deb('resync', 'Calling checkDataFileCopy for '|| cdfrec.fname, dbtype); deb('resync', ' file# '||cdfrec.file#|| ' status '||cdfrec.status|| ' recid '||cdfrec.recid, dbtype); dbms_rcvcat.checkDataFileCopy( cdfrec.recid, cdfrec.stamp, cdfrec.fname, cdfrec.tag, cdfrec.file#, cdfrec.create_scn, cdfrec.create_time, cdfrec.reset_scn, cdfrec.reset_time, cdfrec.incr_level, cdfrec.ckp_scn, cdfrec.ckp_time, cdfrec.online_fuzzy, cdfrec.backup_fuzzy, cdfrec.abs_fuzzy_scn, cdfrec.rcv_fuzzy_scn, cdfrec.rcv_fuzzy_time, cdfrec.blocks, cdfrec.block_size, cdfrec.oldest_offline_range, cdfrec.completion_time, cdfrec.status, cdfrec.controlfile_type, cdfrec.keep_options, cdfrec.keep_until, cdfrec.scanned, cdfrec.is_recovery_dest_file, cdfrec.rman_status_recid, cdfrec.rman_status_stamp, cdfrec.marked_corrupt, cdfrec.foreign_dbid, cdfrec.plugged_readonly, cdfrec.plugin_change#, cdfrec.plugin_resetlogs_change#, cdfrec.plugin_resetlogs_time); exception when change_record_stamp then deb('resync', 'got exception: Changing stamp for this record'); if for_dbuname is not null then null_retVal := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'begin ' || ' sys.dbms_backup_restore.IncrementRecordStamp(' || ' rectype => ' || ' sys.dbms_backup_restore.RTYP_DFILE_COPY ' || ' , recid => ' || cdfrec.recid || ' , stamp => ' || cdfrec.stamp || ' ); ' || 'end;'); else sys.dbms_backup_restore.IncrementRecordStamp( rectype => sys.dbms_backup_restore.RTYP_DFILE_COPY, recid => cdfrec.recid, stamp => cdfrec.stamp); end if; cdfrec.stamp := cdfrec.stamp + 1; krmicd.clearErrors; goto cdf_resync_again; end; end loop; low := high + 1; end loop; end if; dbms_rcvcat.endDataFileCopyResync; recid := dbms_rcvcat.beginCopyCorruptionResync; deb('resync', 'Copy Corruption last recid '||recid|| '; high '||high_cc_recid, dbtype); if (high_cc_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='COPY CORRUPTION'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_cc_recid - low + 1; deb_sort_area(high_cc_recid); while (high < high_cc_recid) loop high := least(low + rec_per_chunk -1, high_cc_recid); for ccbrec in ccb(low, high) loop deb('resync', 'Calling checkCopyCorruption for file '||ccbrec.file# ||' recid '||ccbrec.copy_recid, dbtype); dbms_rcvcat.checkCopyCorruption(ccbrec.recid, ccbrec.stamp, ccbrec.copy_recid, ccbrec.copy_stamp, ccbrec.file#, ccbrec.block#, ccbrec.blocks, ccbrec.corruption_change#, ccbrec.marked_corrupt, ccbrec.corruption_type); end loop; low := high + 1; end loop; end if; dbms_rcvcat.endCopyCorruptionResync; select record_size, decode(records_used, 0, 0, last_recid + 1 - records_used) old_recid, last_recid into rec_size, low_bcr_recid, high_bcr_recid from v$controlfile_record_section where type='DATABASE BLOCK CORRUPTION'; recid := dbms_rcvcat.beginBlockCorruptionResync(low_bcr_recid); deb('resync', 'Block Corruption last recid '||recid|| '; low ' ||low_bcr_recid || ';high '||high_bcr_recid, dbtype); if (high_bcr_recid > recid) then high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_bcr_recid - low + 1; deb_sort_area(high_bcr_recid); while (high < high_bcr_recid) loop high := least(low + rec_per_chunk -1, high_bcr_recid); for bcrrec in bcr(low, high) loop deb('resync', 'Calling checkBlockCorruption for' ||' recid ' || bcrrec.recid ||' stamp ' || bcrrec.stamp ||' file# ' || bcrrec.file# ||' block# ' || bcrrec.block# ||' blocks ' || bcrrec.blocks, dbtype); dbms_rcvcat.checkBlockCorruption(bcrrec.recid, bcrrec.stamp, bcrrec.file#, bcrrec.create_scn, bcrrec.create_time, bcrrec.block#, bcrrec.blocks, bcrrec.corrupt_scn, bcrrec.corruption_type); end loop; low := high + 1; end loop; end if; dbms_rcvcat.endBlockCorruptionResync; recid := dbms_rcvcat.beginDeletedObjectResync; deb('resync', 'DeletedObject last recid '||recid|| '; high '||high_dl_recid, dbtype); if (high_dl_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='DELETED OBJECT'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_dl_recid - low + 1; deb_sort_area(high_dl_recid); while (high < high_dl_recid) loop high := least(low + rec_per_chunk -1, high_dl_recid); for dlrec in dl(low, high) loop deb('resync', 'Calling checkDeletedObject for recid '|| dlrec.recid ||' object type ' || dlrec.object_type ||' with count ' || dlrec.object_recid ||' and stamp ' || dlrec.object_stamp ||' and data ' || dlrec.object_data ||' and set_stamp ' || nvl(to_char(dlrec.set_stamp), 'NULL') ||' and set_count ' || nvl(to_char(dlrec.set_count), 'NULL') ||' and create_scn ' || nvl(to_char(dlrec.object_create_scn), 'NULL'), dbtype); dbms_rcvcat.checkDeletedObject( dlrec.recid, dlrec.stamp, dlrec.object_type, dlrec.object_recid, dlrec.object_stamp, dlrec.object_data, dlrec.object_fname, dlrec.object_create_scn, dlrec.set_stamp, dlrec.set_count); end loop; low := high + 1; end loop; end if; dbms_rcvcat.endDeletedObjectResync; recid := dbms_rcvcat.beginProxyResync; deb('resync', 'ProxyResync last recid '||recid|| '; high '||high_pc_recid, dbtype); if (high_pc_recid > recid) then select record_size into rec_size from v$controlfile_record_section where type='PROXY COPY'; high := recid; low := recid + 1; rec_per_chunk := floor(sort_area_size / rec_size); total_recs := high_pc_recid - low + 1; deb_sort_area(high_pc_recid); while (high < high_pc_recid) loop high := least(low + rec_per_chunk - 1, high_pc_recid); for xdfrec in xdf(low, high) loop << xdf_resync_again >> begin deb('resync', 'Calling checkProxyDataFile for '|| xdfrec.handle || ' recid ' || xdfrec.recid, dbtype); deb('resync', ' file# '||xdfrec.file#|| ' status '||xdfrec.status, dbtype); dbms_rcvcat.checkProxyDataFile(xdfrec.recid, xdfrec.stamp, xdfrec.tag, xdfrec.file#, xdfrec.create_scn, xdfrec.create_time, xdfrec.reset_scn, xdfrec.reset_time, xdfrec.incr_level, xdfrec.ckp_scn, xdfrec.ckp_time, xdfrec.online_fuzzy, xdfrec.backup_fuzzy, xdfrec.abs_fuzzy_scn, xdfrec.rcv_fuzzy_scn, xdfrec.rcv_fuzzy_time, xdfrec.blocks, xdfrec.block_size, xdfrec.oldest_offline_range, xdfrec.device_type, xdfrec.handle, xdfrec.comments, xdfrec.media, xdfrec.media_pool, xdfrec.start_time, xdfrec.completion_time, xdfrec.status, xdfrec.controlfile_type, xdfrec.keep_options, xdfrec.keep_until, xdfrec.rman_status_recid, xdfrec.rman_status_stamp, xdfrec.foreign_dbid, xdfrec.plugged_readonly, xdfrec.plugin_change#, xdfrec.plugin_resetlogs_change#, xdfrec.plugin_resetlogs_time); exception when change_record_stamp then deb('resync', 'got exception: Changing stamp for this record'); if for_dbuname is not null then null_retVal := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'begin ' || ' sys.dbms_backup_restore.IncrementRecordStamp(' || ' rectype => ' || ' sys.dbms_backup_restore.RTYP_PROXY ' || ' , recid => ' || xdfrec.recid || ' , stamp => ' || xdfrec.stamp || ' ); ' || 'end;'); else sys.dbms_backup_restore.IncrementRecordStamp( rectype => sys.dbms_backup_restore.RTYP_PROXY, recid => xdfrec.recid, stamp => xdfrec.stamp); end if; xdfrec.stamp := xdfrec.stamp + 1; krmicd.clearErrors; goto xdf_resync_again; end; end loop; for xalrec in xal(low, high) loop << xal_resync_again >> begin deb('resync', 'Calling checkProxyArchivedLog for '|| xalrec.handle, dbtype); deb('resync', ' thread# ' || xalrec.thread# || ' sequence# ' || xalrec.sequence# || ' status ' || xalrec.status || ' recid ' || xalrec.recid, dbtype); dbms_rcvcat.checkProxyArchivedLog(xalrec.recid, xalrec.stamp, xalrec.tag, xalrec.thread#, xalrec.sequence#, xalrec.resetlogs_change#, xalrec.resetlogs_time, xalrec.first_change#, xalrec.first_time, xalrec.next_change#, xalrec.next_time, xalrec.blocks, xalrec.block_size, xalrec.device_type, xalrec.handle, xalrec.comments, xalrec.media, xalrec.media_pool, xalrec.start_time, xalrec.completion_time, xalrec.status, xalrec.rman_status_recid, xalrec.rman_status_stamp, xalrec.terminal, xalrec.keep_until, xalrec.keep_options); exception when change_record_stamp then deb('resync', 'got exception: Changing stamp for this record'); if for_dbuname is not null then null_retVal := sys.dbms_backup_restore.remoteSQLExecute( source_dbuname => for_dbuname, source_cs => source_cs, stmt => 'begin ' || ' sys.dbms_backup_restore.IncrementRecordStamp(' || ' rectype => ' || ' sys.dbms_backup_restore.RTYP_PROXY ' || ' , recid => ' || xalrec.recid || ' , stamp => ' || xalrec.stamp || ' ); ' || 'end;'); else sys.dbms_backup_restore.IncrementRecordStamp( rectype => sys.dbms_backup_restore.RTYP_PROXY, recid => xalrec.recid, stamp => xalrec.stamp); end if; xalrec.stamp := xalrec.stamp + 1; krmicd.clearErrors; goto xal_resync_again; end; end loop; low := high + 1; end loop; end if; dbms_rcvcat.endProxyResync; << skip_circular_section_resync >> -- Release snapshot enqueue after every thing is resynced. After this we -- want to resync from actual control file after fixing configurations... if (releasecf) then sys.dbms_backup_restore.cfileUseCurrent; end if; -- Now push the configurations to local/remote control file. if (ret = CONFIGRESYNC_TOCF OR ret = CONFIGRESYNC_TORC_TOCF) then resyncConf2ControlFile(for_dbuname, source_cs); end if; -- If directly connected to target, update water marks for configurations -- in catalog schema also by doing resync of configurations from control file -- to recovery catalog. -- However for remote resync, it will be done when RMAN connects to that -- site as target database. Note that the RMAN configuration in control file -- and catalog schema is same, just the recid's are out of sync because -- of not resyncing to catalog. if for_dbuname is null and (ret = CONFIGRESYNC_TOCF OR ret = CONFIGRESYNC_TORC_TOCF) then resyncConf2Catalog(cf_type, TRUE); end if; dbms_rcvcat.endConfigResync2; deb('resync', 'Calling sanityCheck', dbtype); dbms_rcvcat.sanityCheck; deb('resync', 'Calling endCkpt', dbtype); dbms_rcvcat.endCkpt; if (auto_prim_resync) then krmicd.writeMsg(1005, 'automatic resync from primary done'); auto_prim_resync := FALSE; for_dbuname := null; cf_type := null; -- initialize retry counters for new resync read_retries := 0; busy_retries := 0; sort_retries := 0; goto restart_resync; elsif (full_resync) then krmicd.writeMsg(8004); else if not implicit then krmicd.writeMsg(1005, 'partial resync complete'); end if; end if; exited('resync', 'OK'); exception when need_primary_resync then if sync_retries < 5 then krmicd.writeErrMsg(1005, sqlerrm); krmicd.writeMsg(1005, 'doing automatic resync from primary'); sync_retries := sync_retries + 1; dbms_rcvcat.cancelCkpt; auto_prim_resync := TRUE; cf_type := null; -- initialize retry counters for new resync read_retries := 0; busy_retries := 0; sort_retries := 0; krmicd.clearErrors; goto restart_resync; else dbms_rcvcat.cancelCkpt; raise; end if; when sort_area_too_small then if (sort_retries < 5) then sort_retries := sort_retries + 1; krmicd.writeMsg(1005, 'resync got ORA'||to_char(sqlcode)|| ', retrying with sort_area_size = '|| to_char((sort_area_size + sort_area_size_incr) / 1048576)|| 'MB'); sort_area_size := set_sort_area_size(sort_area_size + sort_area_size_incr); goto retry; else dbms_rcvcat.cancelCkpt; krmicd.writeMsg(1005, 'resync got ORA-'||to_char(sqlcode)|| ', giving up'); exited('resync', 'sort_area_too_small'); raise; end if; -- RESYNCTODO: What does inconsistant_read mean for remote resync? when does -- this occur at primary/standby under normal circumstances. when sys.dbms_backup_restore.inconsistant_read then -- retry up to 5 times if resync gets the inconsistent_read exception if (read_retries < 5) then read_retries := read_retries + 1; krmicd.writeMsg(1005, 'resync got ORA-235, retry '||to_char(read_retries)); krmicd.clearErrors; krmicd.sleep(10*read_retries); goto retry; else dbms_rcvcat.cancelCkpt; krmicd.writeMsg(1005, 'resync got ORA-235, giving up'); exited('resync', 'inconsistant_read'); raise; end if; when others then dbms_rcvcat.cancelCkpt; if (releasecf) then sys.dbms_backup_restore.cfileUseCurrent; end if; exited('resync', 'ORA-'||to_char(sqlcode)); raise; end; end; >>> # # devalloc: allocate a device # define devalloc <<< -- devalloc declare devtype varchar2(255); chid varchar2(255); debug number := null; options number := null; node varchar2(255); maxsize number; kbytes number := null; parallel binary_integer := null; readrate number := null; rate number := null; sendcmd varchar2(256); vendor varchar2(256); israc boolean; instname varchar2(17); begin &object& if debug is not null then krmicd.execSql( 'alter session set events ''immediate trace name krb_trace level ' ||debug||''''); end if; if options is not null then krmicd.execSql( 'alter session set events ''immediate trace name krb_options level ' ||options||''''); end if; devtype := sys.dbms_backup_restore.deviceAllocate( ident => chid, node => node, &args& ); if kbytes is null then maxsize := sys.dbms_backup_restore.deviceQuery (sys.dbms_backup_restore.DEVICEQUERY_MAXSIZE); else maxsize := kbytes; end if; if maxsize > 0 then sys.dbms_backup_restore.setlimit (sys.dbms_backup_restore.kbytes, maxsize); end if; krmicd.setChannelInfo (devtype, node, maxsize, sys.dbms_backup_restore.deviceQuery (sys.dbms_backup_restore.DEVICEQUERY_PROXY), sys.dbms_backup_restore.deviceQuery (sys.dbms_backup_restore.DEVICEQUERY_MAXPROXY)); if parallel is not null then sys.dbms_backup_restore.setlimit (sys.dbms_backup_restore.parallel, parallel); end if; if readrate is not null then sys.dbms_backup_restore.setlimit (sys.dbms_backup_restore.readrate, readrate); end if; if rate is not null then sys.dbms_backup_restore.setlimit (sys.dbms_backup_restore.max_read_kbytes, rate); end if; if sendcmd is not null then sys.dbms_backup_restore.devicecommand(sendcmd, NULL); end if; krmicd.writeMsg(8030, chid); krmicd.getInstance(instname, israc); if (israc) then krmicd.writeMsg(8605, chid, to_char(krmicd.getSid), to_char(instname), devtype); else krmicd.writeMsg(8500, chid, to_char(krmicd.getSid), devtype); end if; vendor := sys.dbms_backup_restore.deviceQuery (sys.dbms_backup_restore.DEVICEQUERY_VENDOR); if vendor is not null then krmicd.writemsg(8526, chid, vendor); end if; end; >>> # # devrel: release a device # define devrel <<< -- devrel begin sys.dbms_backup_restore.deviceDeallocate; sys.dbms_backup_restore.set_client_info(''); krmicd.writeMsg(8031, krmicd.getChid); krmicd.clearChannelInfo; -- tell krmq no device here now end; >>> # # setlm: set a limit on a device channel # define setlm <<< begin >>> define 'kbytes' <<< sys.dbms_backup_restore.setLimit( sys.dbms_backup_restore.kbytes,&args&); >>> define 'rate' <<< sys.dbms_backup_restore.setLimit( sys.dbms_backup_restore.max_read_kbytes,&args&); >>> define 'readrate' <<< sys.dbms_backup_restore.setLimit( sys.dbms_backup_restore.readrate,&args&); >>> define 'parallel' <<< sys.dbms_backup_restore.setLimit( sys.dbms_backup_restore.parallel,&args&); >>> define setlmend <<< end; >>> # # change: crosscheck or delete objects # # IMPORTANT: ANY DBMS_BACKUP_RESTORE ROUTINE YOU CALL IN HERE NEEDS TO BE # LISTED IN THE krmx_shortrpc ARRAY. # define 'change' <<< -- change declare found boolean; mismatch boolean; anymiss boolean := FALSE; runatsite boolean := FALSE; ocount number := 0; mcount number := 0; rscount number := 0; rc number; rc_in_use number; msg number; /* about the object */ new_status varchar2(1); old_status varchar2(1); objectType number; handle varchar2(512); recid number; stamp number; obj_key1 number; obj_key2 number; pieceno number; blksize number; rlscn number; rlstime date; crescn number; ckpscn number; new_fname varchar2(512); cmdmsg number; cmdtxt varchar2(32); new_recid number; new_stamp number; obj_typen varchar2(20); devtype varchar2(20); devicetype varchar2(20); writeflag binary_integer; dummy binary_integer; keep_until_d date := NULL; force binary_integer; hdl_isdisk binary_integer; site_key number; this_site_key number := 0; nsite_key number := 0; source_dbid number; foreignal binary_integer; to_db_unique_name varchar2(30); -- constants include cmd (krminod type), dbnomount, -- keep attributes and the krminod types &constants& -- exceptions dexpired_exists exception; internal_error exception; pragma exception_init(dexpired_exists, -20502); pragma exception_init(internal_error, -600); begin &1& entered('change'); deb('change', 'started, command:'||cmd); select decode(cmd, krmiAVAIL, 6200, krmiDELETE, 6201, krmiDEXPIRED, 6202, krmiKEEP, 6203, krmiUAVAIL, 6204, krmiUNCAT, 6205, krmiXCHECK, 6206) into cmdmsg from x$dual; select decode(cmd, krmiAVAIL, 'CHANGE AVAILABLE', krmiDELETE, 'DELETE', krmiDEXPIRED, 'DELETE', krmiXCHECK, 'CROSSCHECK', 'UNKNOWN') into cmdtxt from x$dual; devicetype := NULL; %IF% catalog this_site_key := dbms_rcvcat.getThisSiteKey; nsite_key := dbms_rcvcat.getThisSiteKey(to_db_unique_name); deb('change', 'this_site_key=' || this_site_key); deb('change', 'nsite_key=' || nsite_key); %ENDIF% catalog loop found := FALSE; mismatch := FALSE; rc_in_use := 0; -- get next object not found on another channel yet. krmicd.changeGetNext(objectType, handle, recid, stamp, obj_key1, obj_key2, pieceno, blksize, ckpscn, rlscn, rlstime, crescn, old_status, devtype, site_key, source_dbid); deb('change', 'File '|| handle || ' belongs to '||site_key); %IF% catalog -- translation from control file returns site_key as zero, treat -- these records as if they belong to current site. Note that -- in catalog mode, this_site_key may be non-zero value from catalog -- but translation still may happens in some cases against controlfile -- for some commands like implicit crosscheck, etc. if site_key = 0 and this_site_key <> 0 then site_key := this_site_key; deb('change', 'updated site_key to '|| site_key); end if; %ENDIF% catalog exit when objectType = 0; if old_status != 'X' and cmd = krmiDEXPIRED then krmicd.writeMsg(1005, 'INTERNAL ERROR: Available object during '|| 'DELETE EXPIRED'); raise internal_error; end if; if nforce = 1 or (old_status = 'U' and cmd = krmiDELETE) then force := 1; else force := 0; end if; if devtype = 'DISK' then hdl_isdisk := 1; else hdl_isdisk := 0; end if; if devicetype is NULL and devtype is not NULL then devicetype := devtype; end if; deb('change', ' processing (file/handle='||handle||','|| 'recid='||to_char(recid)||', old_status='||old_status|| ', hdl_isdisk=' || hdl_isdisk || ', devicetype=' || nvl(devicetype, 'NULL') || ', source_dbid=' || source_dbid || ')'); deb('change',' force: '||force); if objectType = krmiBPIECEX then obj_typen := 'Backup Piece'; elsif objectType = krmiPC then obj_typen := 'Proxy Copy'; elsif objectType = krmiDC then obj_typen := 'Datafile Copy'; elsif objectType = krmiRAL then if (source_dbid = 0) then obj_typen := 'Archivelog'; foreignal := 0; else obj_typen := 'Foreign Archivelog'; foreignal := 1; end if; elsif objectType = krmiBS then obj_typen := 'Backup Set'; else krmicd.writeMsg(1005, 'INTERNAL ERROR: unexpected objectType='|| to_char(objectType)); raise internal_error; end if; if cmd in (krmiUAVAIL, krmiUNCAT, krmiKEEP, krmiRESETDBUN) then -- Change to unavailable or change of keep atributtes can be done on -- any channel, so lets do it on this one. -- Change to available must be done on the appropriate channel, as -- we crosscheck the object first, to verify it really exists. -- Also, we don't require the user to allocate any special channels -- for available/unavailable (we will execute on the default -- channel if necessary or auto-allocate will allocate what we need). found := TRUE; else -- Call the appropriate dbms_backup_restore validate function based on -- object type. begin if objectType = krmiBPIECEX then deb('change', 'Calling ValidateBackupPiece for '||handle); rc := sys.dbms_backup_restore.validateBackupPiece( recid => recid, stamp => stamp, handle => handle, set_stamp => obj_key2, set_count => obj_key1, pieceno => pieceno, hdl_isdisk => hdl_isdisk); elsif objectType = krmiPC then deb('change', 'Calling proxyValOnly for '||handle); rc := sys.dbms_backup_restore.proxyValOnly( recid => recid, stamp => stamp, handle => handle); elsif objectType = krmiDC then deb('change', 'Calling ValidateDataFileCopy for '||handle); rc := sys.dbms_backup_restore.validateDataFileCopy( recid => recid, stamp => stamp, fname => handle, dfnumber => obj_key1, resetlogs_change => rlscn, creation_change => crescn, checkpoint_change => ckpscn, blksize => blksize, signal => 0); elsif objectType = krmiRAL then deb('change', 'Calling ValidateArchivedLog for '||handle); rc := sys.dbms_backup_restore.validateArchivedLog( recid => recid, stamp => stamp, fname => handle, thread => obj_key1, sequence => obj_key2, resetlogs_change => rlscn, first_change => crescn, blksize => blksize, signal => 0, terminal => 0, foreignal => foreignal); end if; exception when others then if implicit != 0 then -- if implicit crosscheck, then just display the errors -- and mark the file as expired. rc := sys.dbms_backup_restore.validate_file_different; krmicd.writeErrMsg(1005, sqlerrm); krmicd.clearErrors; elsif force = 0 then raise; else krmicd.clearErrors; end if; end; deb('change', 'file '||handle||', rc='||rc); rc_in_use := bitand(rc, sys.dbms_backup_restore.validate_in_use); rc := bitand(rc, sys.dbms_backup_restore.validate_file_different); deb('change', 'file '||handle||', Modified rc='||rc|| ' rc_in_use='||rc_in_use); if rc = 0 and rc_in_use = 0 then found := TRUE; end if; end if; deb('change', 'file '||handle||', found='||bool2char(found)); if krmicd.changeSetFound(found) then if found then deb('change', 'file '||handle||' in found'); if cmd in (krmiDELETE, krmiDEXPIRED) and old_status = 'X' then deb('change', 'file '||handle||'mismatched due to X'); mismatch := TRUE; -- we should not find an object that was marked expired -- raise dexpired_exists; end if; else if rc_in_use != 0 then deb('change', 'cannot validate file:'||handle|| ', in use by another process'); if force = 0 then krmicd.writeMsg(8167, cmdtxt); krmicd.writeMsg(8517, handle, to_char(recid),to_char(stamp)); goto next_object; end if; end if; if cmd = krmiAVAIL then deb('change', 'file '||handle||' not found in AVAIL'); if objectType = krmiBPIECEX then krmicd.writeMsg(6481, handle); elsif objectType = krmiPC then krmicd.writeMsg(6482, handle); elsif objectType = krmiDC then if obj_key1 = 0 then krmicd.writeMsg(6479, handle); else krmicd.writeMsg(6478, handle); end if; elsif objectType = krmiRAL then krmicd.writeMsg(6480, handle); end if; -- Decrement counter, we are not actually changing this object -- to available, but to expired. ocount := ocount - 1; elsif cmd = krmiDELETE then deb('change', 'file '||handle||' not found in DELETE'); if old_status = 'A' then mismatch := TRUE; end if; else deb('change', 'file '||handle||' not found for cmd '||cmd); end if; end if; deb('change', 'file '||handle||' mismatch: '||bool2char(mismatch)|| ' force: '||force|| ' rc_in_use: '||rc_in_use ); if mismatch and force = 0 then -- record information about mismatches for later retrieval krmicd.mismatchObj(obj_typen, handle, site_key); anymiss := TRUE; mcount := mcount + 1; goto next_object; end if; -- determine new_status if cmd in (krmiXCHECK, krmiAVAIL) then if found then new_status := 'A'; else new_status := 'X'; end if; elsif cmd in (krmiDEXPIRED, krmiDELETE) then new_status := 'D'; elsif cmd = krmiUNCAT then new_status := 'R'; elsif cmd = krmiUAVAIL then new_status := 'U'; elsif cmd = krmiRESETDBUN then new_status := old_status; elsif cmd = krmiKEEP then -- we don't need status if change keep new_status := 'K'; keep_until_d := stamp2date(keep_until); else krmicd.writeMsg(1005,'INTERNAL ERROR: unknown cmd='||to_char(cmd)); raise internal_error; end if; -- If we are crosschecking or trying to make a file that is owned by -- other site and file could not be found, then ask user to crosscheck -- such files at owner site. if cmd in (krmiXCHECK, krmiAVAIL) then if not found and site_key <> this_site_key then deb('change','file ' || handle || ' must be crosschecked at site ' || site_key); krmicd.runAtSiteObj(obj_typen, handle, site_key); runatsite := TRUE; rscount := rscount + 1; goto next_object; end if; end if; -- If we are deleting a file that is owned by other site and -- could not be found, then ask user to delete at that site, -- These files will be listed at the end of the command execution. if cmd in (krmiDEXPIRED, krmiDELETE) then if not found and site_key <> this_site_key then deb('change','file ' || handle || ' must be deleted at site ' || site_key); krmicd.runAtSiteObj(obj_typen, handle, site_key); runatsite := TRUE; rscount := rscount + 1; goto next_object; end if; end if; -- call appropriate dbms_backup_restore change function based on -- object type, passing new status. if objectType = krmiBS then %IF% catalog if (cmd = krmiRESETDBUN AND (site_key is null or site_key <> nsite_key)) then dbms_rcvcat.changeBackupSet( recid => recid, stamp => stamp, keep_options => keep_options, keep_until => keep_until_d, osite_key => site_key, nsite_key => nsite_key); end if; %ENDIF% catalog if dbnomount = 0 then sys.dbms_backup_restore.changeBackupSet( recid => recid, stamp => stamp, set_count => obj_key1, keep_options => keep_options, keep_until => keep_until); %IF% catalog else dbms_rcvcat.changeBackupSet( recid => recid, stamp => stamp, keep_options => keep_options, keep_until => keep_until_d); %ENDIF% catalog end if; if cmd = krmiKEEP then if keep_options = 0 then -- write: keep is deleted krmicd.writeMsg(8121); else -- write: keep is changed krmicd.writeMsg(8122); if keep_until > 0 then krmicd.writeMsg(6518, to_char(keep_until_d)); else krmicd.writeMsg(6519); end if; if (keep_options = KEEP_LOGS) then -- write: archive logs required to recover this -- backup will be kept krmicd.writeMsg(6521); elsif (keep_options = KEEP_NOLOGS) then -- write: archive logs required to recover this -- backup will expire when this backup exipres krmicd.writeMsg(6520); elsif (keep_options != KEEP_CONSIST) then -- Say nothing for consistent, die if not consistent. krmicd.writeMsg(1005, 'INTERNAL ERROR: ' || 'unexpected keep option for Backup Set'); raise internal_error; end if; end if; else krmicd.writeMsg(1005, 'INTERNAL ERROR: unexpected cmd='|| to_char(cmd)||' for Backup Set'); raise internal_error; end if; krmicd.writeMsg(8539, to_char(obj_key2), -- set key to_char(recid), -- set recid to_char(stamp)); -- set stamp elsif objectType = krmiBPIECEX then %IF% catalog if (cmd = krmiRESETDBUN AND (site_key is null or site_key <> nsite_key)) then dbms_rcvcat.changeBackupPiece( bp_recid => recid, bp_stamp => stamp, status => new_status, set_stamp => obj_key2, set_count => obj_key1, osite_key => site_key, nsite_key => nsite_key); end if; %ENDIF% catalog if (old_status != new_status) then if dbnomount = 0 then sys.dbms_backup_restore.changeBackupPiece( recid => recid, stamp => stamp, handle => handle, set_stamp => obj_key2, set_count => obj_key1, pieceno => pieceno, status => new_status, force => force); %IF% catalog else -- Special case for delete when database is not mounted if dbnomount != 0 and new_status = 'D' and force = 0 then sys.dbms_backup_restore.changeBackupPiece( recid => recid, stamp => stamp, handle => handle, set_stamp => obj_key2, set_count => obj_key1, pieceno => pieceno, status => new_status, force => 1); end if; dbms_rcvcat.changeBackupPiece( bp_recid => recid, bp_stamp => stamp, status => new_status, set_stamp => obj_key2, set_count => obj_key1); %ENDIF% catalog end if; else deb('change', 'file '||handle|| ' not updated on repository, old_status:'||old_status|| ' same as new_status:'||new_status||',site_key:'||site_key); end if; if cmd = krmiXCHECK then if found then krmicd.writeMsg(8074, 'AVAILABLE'); else krmicd.writeMsg(8074, 'EXPIRED'); end if; elsif cmd in (krmiDEXPIRED, krmiDELETE) then if (rc_in_use = 0 or (rc_in_use != 0 and force != 0)) then krmicd.writeMsg(8073); end if; elsif cmd = krmiAVAIL then if found then krmicd.writeMsg(6115); else krmicd.writeMsg(6486); end if; elsif cmd = krmiUAVAIL then krmicd.writeMsg(6111); elsif cmd = krmiRESETDBUN then if (site_key is null or site_key <> nsite_key) then krmicd.writeMsg(1005, 'change backup piece db_unique_name'); end if; elsif cmd = krmiUNCAT then krmicd.writeMsg(8128); end if; if (rc_in_use = 0 or (rc_in_use != 0 and force != 0)) then krmicd.writeMsg(8517, handle, to_char(recid), to_char(stamp)); ocount := ocount + 1; end if; elsif objectType = krmiPC then %IF% catalog if (cmd = krmiRESETDBUN AND (site_key is null or site_key <> nsite_key)) then dbms_rcvcat.changeProxyCopy( pc_recid => recid, pc_stamp => stamp, status => new_status, keep_options => keep_options, keep_until => keep_until_d, osite_key => site_key, nsite_key => nsite_key); end if; %ENDIF% catalog if (old_status != new_status) then if dbnomount = 0 then sys.dbms_backup_restore.proxyChange( recid => recid, stamp => stamp, handle => handle, status => new_status, keep_options => keep_options, keep_until => keep_until, force => force); %IF% catalog else dbms_rcvcat.changeProxyCopy( pc_recid => recid, pc_stamp => stamp, status => new_status, keep_options => keep_options, keep_until => keep_until_d); %ENDIF% catalog end if; else deb('change', 'file '||handle|| ' not updated on repository, old_status:'||old_status|| 'same as new_status:'||new_status||',site_key:'||site_key); end if; if cmd = krmiXCHECK then if found then krmicd.writeMsg(6450, 'AVAILABLE'); else krmicd.writeMsg(6450, 'EXPIRED'); end if; elsif cmd in (krmiDEXPIRED, krmiDELETE) then krmicd.writeMsg(6449); elsif cmd = krmiAVAIL then if found then krmicd.writeMsg(6447); else krmicd.writeMsg(6487); end if; elsif cmd = krmiUAVAIL then krmicd.writeMsg(6446); elsif cmd = krmiRESETDBUN then if (site_key is null or site_key <> nsite_key) then krmicd.writeMsg(1005, 'change proxy copy db_unique_name'); end if; elsif cmd = krmiUNCAT then krmicd.writeMsg(6448); elsif cmd = krmiKEEP then if keep_options = 0 then -- write: keep is deleted krmicd.writeMsg(8125); else -- write: keep is changed krmicd.writeMsg(8126); if keep_until > 0 then krmicd.writeMsg(6518, to_char(keep_until_d)); else krmicd.writeMsg(6519); end if; if keep_options = KEEP_LOGS then -- write: archive logs required to recover this -- backup will be kept krmicd.writeMsg(6521); elsif keep_options = KEEP_NOLOGS then -- write: archive logs required to recover this -- backup will expire when this backup exipres krmicd.writeMsg(6520); elsif keep_options = KEEP_CONSIST then krmicd.writeMsg(1005, 'INTERNAL ERROR: ' || 'unexpected keep option for Proxy Copy'); raise internal_error; end if; end if; else krmicd.writeMsg(1005, 'INTERNAL ERROR: unexpected cmd='|| to_char(cmd)||' for Proxy Copy'); raise internal_error; end if; krmicd.writeMsg(6451, handle, to_char(recid), to_char(stamp)); ocount := ocount + 1; elsif objectType = krmiDC then %IF% catalog if (cmd = krmiRESETDBUN AND (site_key is null or site_key <> nsite_key)) then dbms_rcvcat.changeDataFileCopy( cdf_recid => recid, cdf_stamp => stamp, status => new_status, keep_options => keep_options, keep_until => keep_until_d, osite_key => site_key, nsite_key => nsite_key); end if; %ENDIF% catalog if (old_status != new_status) then if dbnomount = 0 then sys.dbms_backup_restore.changeDataFileCopy( recid => recid, stamp => stamp, fname => handle, dfnumber => obj_key1, resetlogs_change => rlscn, creation_change => crescn, checkpoint_change => ckpscn, blksize => blksize, new_status => new_status, keep_options => keep_options, keep_until => keep_until, force => force); %IF% catalog else dbms_rcvcat.changeDataFileCopy( cdf_recid => recid, cdf_stamp => stamp, status => new_status, keep_options => keep_options, keep_until => keep_until_d); %ENDIF% catalog end if; else deb('change', 'file '||handle|| ' not updated on repository, old_status:'||old_status|| 'same as new_status:'||new_status||',site_key:'||site_key); end if; if cmd = krmiXCHECK then if found then if obj_key1 = 0 then krmicd.writeMsg(6156); else krmicd.writeMsg(6154); end if; else if obj_key1 = 0 then krmicd.writeMsg(6155); else krmicd.writeMsg(6153); end if; end if; elsif cmd in (krmiDEXPIRED, krmiDELETE) then if obj_key1 = 0 then krmicd.writeMsg(8072); else krmicd.writeMsg(8070); end if; elsif cmd = krmiAVAIL then if obj_key1 = 0 then if found then krmicd.writeMsg(6114); else krmicd.writeMsg(6484); end if; else if found then krmicd.writeMsg(6112); else krmicd.writeMsg(6483); end if; end if; elsif cmd = krmiUAVAIL then if obj_key1 = 0 then krmicd.writeMsg(6110); else krmicd.writeMsg(6108); end if; elsif cmd = krmiRESETDBUN then if (site_key is null or site_key <> nsite_key) then if obj_key1 = 0 then krmicd.writeMsg(1005, 'change control file copy db_unique_name'); else krmicd.writeMsg(1005, 'change datafile copy db_unique_name'); end if; end if; elsif cmd = krmiUNCAT then if obj_key1 = 0 then krmicd.writeMsg(6121); else krmicd.writeMsg(6119); end if; elsif cmd = krmiKEEP then if keep_options = 0 then -- write: keep is deleted krmicd.writeMsg(8123); else -- write: keep is changed krmicd.writeMsg(8124); if keep_until > 0 then krmicd.writeMsg(6512, to_char(keep_until_d)); else krmicd.writeMsg(6513); end if; if keep_options = KEEP_LOGS then -- write: archive logs required to recover this -- copy will be kept krmicd.writeMsg(6515); elsif keep_options = KEEP_NOLOGS then -- write: archive logs required to recover this -- copy will expire when this backup exipres krmicd.writeMsg(6514); elsif keep_options = KEEP_CONSIST then krmicd.writeMsg(1005, 'INTERNAL ERROR: ' || 'unexpected keep option for Copy'); raise internal_error; end if; end if; else krmicd.writeMsg(1005, 'INTERNAL ERROR: unexpected cmd='|| to_char(cmd)||' for Copy'); raise internal_error; end if; if obj_key1 = 0 then krmicd.writeMsg(8516, handle, to_char(recid), to_char(stamp)); else krmicd.writeMsg(8513, handle, to_char(recid), to_char(stamp)); end if; ocount := ocount + 1; elsif objectType = krmiRAL then %IF% catalog if (cmd = krmiRESETDBUN AND (site_key is null or site_key <> nsite_key)) then dbms_rcvcat.changeArchivedLog( al_recid => recid, al_stamp => stamp, status => new_status, osite_key => site_key, nsite_key => nsite_key); end if; %ENDIF% catalog if (old_status != new_status) then if dbnomount = 0 then sys.dbms_backup_restore.changeArchivedLog( recid => recid, stamp => stamp, fname => handle, thread => obj_key1, sequence => obj_key2, resetlogs_change => rlscn, first_change => crescn, blksize => blksize, new_status => new_status, force => force, foreignal => foreignal); %IF% catalog else if (foreignal != 0) then krmicd.writeMsg(1005, 'INTERNAL ERROR: unexpected database status'|| ' for Foreign Archivelog'); raise internal_error; end if; dbms_rcvcat.changeArchivedLog( al_recid => recid, al_stamp => stamp, osite_key => site_key, status => new_status); %ENDIF% catalog end if; else deb('change', 'file '||handle|| ' not updated on repository, old_status:'||old_status|| ',same as new_status:'||new_status||',site_key:'||site_key); end if; if (foreignal = 0) then if cmd = krmiXCHECK then if found then krmicd.writeMsg(6158); else krmicd.writeMsg(6157); end if; elsif cmd in (krmiDEXPIRED, krmiDELETE) then krmicd.writeMsg(6406); elsif cmd = krmiAVAIL then if found then krmicd.writeMsg(6113); else krmicd.writeMsg(6485); end if; elsif cmd = krmiUAVAIL then krmicd.writeMsg(6109); elsif cmd = krmiRESETDBUN then if (site_key is null or site_key <> nsite_key) then krmicd.writeMsg(1005, 'change archived log db_unique_name'); end if; elsif cmd = krmiUNCAT then krmicd.writeMsg(6120); end if; krmicd.writeMsg(8514, handle, to_char(recid), to_char(stamp)); else -- foreign archived log if cmd = krmiXCHECK then if found then krmicd.writeMsg(8618); else krmicd.writeMsg(8617); end if; elsif cmd in (krmiDEXPIRED, krmiDELETE) then krmicd.writeMsg(8621); elsif cmd = krmiAVAIL then if found then krmicd.writeMsg(8622); else krmicd.writeMsg(8623); end if; elsif cmd in (krmiUAVAIL, krmiRESETDBUN) then krmicd.writeMsg(1005, 'INTERNAL ERROR: unexpected cmd='|| to_char(cmd)||' for Foreign Archivelog'); raise internal_error; elsif cmd = krmiUNCAT then krmicd.writeMsg(8620); end if; krmicd.writeMsg(8619, handle, to_char(recid), to_char(stamp)); end if; ocount := ocount + 1; else krmicd.writeMsg(1005, 'INTERNAL ERROR: unexpected objectType='|| to_char(objectType)); raise internal_error; end if; end if; <> null; end loop; if ocount > 0 then if cmd = krmiRESETDBUN then krmicd.writeMsg(1005, 'Changed ' || to_char(ocount) || ' objects db_unique_name'); else krmicd.writeMsg(cmdmsg, to_char(ocount)); end if; if ((not anymiss) and implicit = 0)then krmicd.writeMsg(0); end if; if (cmd in (krmiDELETE, krmiDEXPIRED, krmiUNCAT)) then sys.dbms_backup_restore.cleanupBackupRecords; end if; end if; if anymiss then -- Output mismatched records krmicd.writeMsg(0); krmicd.writeMsg(6207, mcount, nvl(devicetype, ' ')); krmicd.writeMsg(6208); krmicd.listMismatch; krmicd.writeMsg(0); end if; if runatsite then -- Output records, that must run at owner site krmicd.writeMsg(0); krmicd.writeMsg(6216, rscount); krmicd.listRunAtSite; krmicd.writeMsg(0); end if; exited('change', 'OK'); end; >>> # # change_failure: change failure # define change_failure <<< -- change_failure declare failureList sys.dbms_ir.ir_failure_list_type; errorList sys.dbms_ir.ir_error_list_type; cmdmsg number; failureId number; firstcall binary_integer; &constants& internal_error exception; pragma exception_init(internal_error, -600); begin entered('change_failure'); deb('change_failure', 'started, command: '|| cmd); select decode(cmd, krmiHIGH, 7207, krmiLOW, 7208, krmiCLOSED, 7209) into cmdmsg from x$dual; firstcall := 1; loop exit when not krmicd.failureGetNext(firstcall, failureId); firstcall := 0; failureList(failureList.count + 1) := failureId; deb('change_failure', 'added failureId = ' || failureId); end loop; if (failureList.count = 0) then raise internal_error; end if; if (cmd = krmiCLOSED) then sys.dbms_ir.closeFailures( failureList => failureList ,errorList => errorList); elsif (cmd = krmiHIGH) then sys.dbms_ir.changePriority( failureList => failureList ,newPriority => sys.dbms_ir.IR_FAILURE_HIGH ,errorList => errorList); elsif (cmd = krmiLOW) then sys.dbms_ir.changePriority( failureList => failureList ,newPriority => sys.dbms_ir.IR_FAILURE_LOW ,errorList => errorList); else raise internal_error; end if; for i in 1..errorList.count loop if (abs(errorList(i).errorCode) = 51102) then krmicd.writeMsg(8062, to_char(errorList(i).failureID)); else krmicd.writeMsg(8061, to_char(errorList(i).failureID), abs(errorList(i).errorCode)); end if; end loop; krmicd.writeMsg(cmdmsg, to_char(failureList.count - errorList.count)); exited('change_failure', 'OK'); end; >>> # # catdfc: catalog a datafilecopy # define catdfc <<< -- catdfc declare fname varchar2(512); full_name varchar2(512); recid number; stamp number; rsid number; rsts number; begin &object& sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); sys.dbms_backup_restore.inspectDataFileCopy(fname => fname, full_name => full_name, recid => recid, stamp => stamp, &args&); krmicd.writeMsg(8050); krmicd.writeMsg(8513, full_name, to_char(recid), to_char(stamp)); sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> # catalc: catalog an archivelog copy # define catalc <<< -- catalc declare fname varchar2(512); full_name varchar2(512); recid number; stamp number; rsid number; rsts number; begin &object& sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); sys.dbms_backup_restore.inspectArchivedLog(fname => fname, full_name => full_name, recid => recid, stamp => stamp); krmicd.writeMsg(8051); krmicd.writeMsg(8514, full_name, to_char(recid), to_char(stamp)); sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> # catbcf: catalog a backup controlfile # define catbcf <<< -- catbcf declare fname varchar2(512); full_name varchar2(512); recid number; stamp number; rsid number; rsts number; begin &object& sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); sys.dbms_backup_restore.inspectControlFile(fname => fname, full_name => full_name, recid => recid, stamp => stamp); krmicd.writeMsg(8052); krmicd.writeMsg(8516, full_name, to_char(recid), to_char(stamp)); sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> # # catbp: catalog a backuppiece # - reset last backupset recids (high water mark). # define catbp <<< -- catbp declare handle varchar2(512); full_handle varchar2(512); recid number; stamp number; err_num number := 0; err_msg varchar2(2048); rsid number; rsts number; db_not_mounted exception; pragma exception_init(db_not_mounted, -1507); begin &object& begin sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); sys.dbms_backup_restore.inspectBackupPiece( handle => handle, full_handle => full_handle, recid => recid, stamp => stamp); exception when db_not_mounted then raise; -- consider error message as information message and continue -- cataloging other pieces in the list when others then -- Copy the error message to a local variable for manipulation. err_num := sqlcode; err_msg := sqlerrm; end; if (err_num = 0) then krmicd.writeMsg(8127); krmicd.writeMsg(8517, full_handle, to_char(recid), to_char(stamp)); else krmicd.writeErrMsg(1005, err_msg); -- record information about handle for later retrieval krmicd.mismatchObj('Backup Piece', handle, 0); krmicd.setDeferError; end if; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> # # clean: cleanup our session. This is called for each allocated channel # when all other commands/steps have terminated normally. # It is also called when a step terminates abnormally and the error # is not retry-able or the user does not want to retry. # # The first 'select count(*) from x$dual' statement here is a # workaround for bug 756629. When an error occurs on a # channel, and multiple channels are allocated, we call upibrk/upirst on # all the running channels. Although upirst is supposed to reset the # connection state so that the next call doesn't see the ORA-03111 error # when indicates that upibrk was called, this isn't happening, and the next # call (which was the below call to backupCancel) would get the ORA-03111. # This then results in a hung connection, because the client ttc layer # attempts to re-try the call that got ORA-03111, but the PL/SQL RPC # callback isn't designed to re-try; because the callback was already called # once, it doesn't think it needs to send the IN parameters again, so they # are not placed on the network. The server expects to see them, however, # and hangs in read() when it tries to get them from the data packet and # they are not there. A 'select' statement, on the other hand does not # have any problems when retried, so we use an innocuous statement to # absorb the 3111 before the next RPC. Note that x$dual is a valid table # when the database is started, mounted, or open. We use krmicd.execSql # instead of just executing the statement directly here because the # delete channel doesn't call krmkllc, so the compilation of this step # will fail when the delete channel is released when the database is # not open. define clean <<< -- clean declare /* device status variables */ state binary_integer; devtype varchar2(512); name varchar2(512); bufsz binary_integer; bufcnt binary_integer; kbytes number; readrate binary_integer; parallel binary_integer; thread number; kcrmx_recs number; autochn number := 0; mr_not_started exception; pragma exception_init(mr_not_started, -1112); db_not_mounted exception; pragma exception_init(db_not_mounted, -1507); begin &object& begin krmicd.execSql('select count(*) from x$dual'); exception when others then krmicd.clearErrors; end; sys.dbms_backup_restore.backupCancel; sys.dbms_backup_restore.restoreCancel(FALSE); begin sys.dbms_backup_restore.proxyCancel; exception when others then krmicd.clearErrors; end; sys.dbms_backup_restore.cfileUseCurrent; -- release enqueue sys.dbms_backup_restore.deviceStatus(state, devtype, name, bufsz, bufcnt, kbytes, readrate, parallel); begin sys.dbms_backup_restore.bmrCancel; exception when others then krmicd.clearErrors; end; begin sys.dbms_backup_restore.flashbackCancel; exception when others then krmicd.clearErrors; end; begin if krmicd.mrCheck > 0 then krmicd.execSql('alter database recover cancel'); end if; exception when others then krmicd.clearErrors; end; -- If autchn is set to 0, then it the channel is user allocated, hence can be -- deallocated. However, we will call dbms_backup_restore.deviceDeallocate -- only if server says that the device is actually allocated. On the -- other hand, we will call krmicd.clearChannelInfo even if server -- thinks that device is not allocated because it can be that -- deviceAllocate have failed. if (autochn = 0) then if (state > sys.dbms_backup_restore.NO_DEVICE) then sys.dbms_backup_restore.deviceDeallocate; krmicd.writeMsg(8031, krmicd.getChid); -- Clear the client_info field on channels which have no device -- allocated. This has the effect of leaving the client_info field -- present on the default channel. sys.dbms_backup_restore.set_client_info(''); end if; krmicd.clearChannelInfo; -- tell krmq no device here now end if; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> # # del_start: delete files # This skeleton used by backup optimization to delete SKIPPED files # (archivelog, backupset) when 'delete input' option is specified # in 'backup' command. # define del_start <<< -- del_start declare first_time boolean := TRUE; arch_recid number; arch_stamp number; fname varchar2(512); thread number; sequence number; resetlogs_change number; resetlogs_time varchar2(512); first_change number; next_change number; blksize number; handle varchar2(512); recid number; stamp number; cfisstby boolean := FALSE; chtype varchar2(16); cfauto boolean := FALSE; skipped boolean := FALSE; dfnumber number; copy_recid number; copy_stamp number; creation_change number; checkpoint_change number; no_delete binary_integer; reqscn number; -- streams/standby required scn rlgscn number; -- streams/standby resetlogs scn appscn number; apprlgscn number; alldest number := 0; docopies boolean := FALSE; reqbackups number; nbackups number; begin first_time := TRUE; -- make sure atleast one statement -- if immediately appended by bu_end chtype := krmicd.getDevType; >>> # # budf_start: backup datafiles. This is the first skeleton in the step. # define budf_start <<< -- budf_start /* This must be retriable, which means a backup conversation may already * be in progress when this step (re)executes. */ declare /* backup conversation status variables */ state binary_integer; setid number; stamp number; pieceno binary_integer; files binary_integer; datafiles boolean; incremental boolean; nochecksum boolean; device boolean; hdrupd boolean := TRUE; /* piece creation variables */ done boolean; concur boolean; chg_tracking_err number; /* Miscellaneous */ memnum number; dfnumber number; cfname varchar2(512); copy_recid number; copy_stamp number; busy_retries number := 0; resetlogs_change number; creation_change number; checkpoint_change number; blksize number; blocks number; fname varchar2(1024); no_delete binary_integer; copy number; nformat number := 1; handle varchar2(512); comment varchar2(80); media varchar2(80); wrong_format exception; pragma exception_init(wrong_format, -20039); first_time boolean := TRUE; backup_level number; elapsed number; starttime date; hours number; mins number; secs number; ncopies number := 0; docompress boolean := FALSE; compressalg varchar2(80); compressasof number; compresslopt binary_integer; /* backup_type is used to indicate what type of backup is done. This is used * to get configured copies, look at krmkgbac for more comments. */ backup_type number := 2; isstby boolean; larchlog_failover boolean; failoverdone boolean := FALSE; docopies boolean := FALSE; cnvrtto boolean := FALSE; cnvrtfr boolean := FALSE; sameen boolean := FALSE; reusefile boolean := FALSE; tsname varchar2(30) := NULL; thread number := NULL; sequence number := NULL; m number := 8581; cprecid number; cpstamp number; rsid number; rsts number; cptag varchar2(31) := NULL; noct boolean := FALSE; nooptim boolean := FALSE; dontcare varchar2(1); pltfrmto number := NULL; pltfrmfr number := NULL; foreign_dbname varchar2(8) := NULL; foreign_dbid number := NULL; doconvert boolean := FALSE; savepiecename boolean := FALSE; transontarget boolean := FALSE; transonlyundo boolean := FALSE; convertdb boolean := FALSE; processfile boolean := TRUE; isomf boolean; istmplt boolean; isasm boolean; validatecmd boolean; validateopt boolean; newcorrupt boolean; -- TRUE if new corruption is found updateok boolean; snapshot_cf boolean; /* Multi-section backup fields */ msb_secbytes number := 0; msb_file_size number; msb_set_stamp number; msb_set_count number; msb_section_size number; msb_first_section number; msb_section_count number; msb_piece_number number; msb_piece_count number; cnvrt_need_format exception; bkp_need_format exception; pragma exception_init(cnvrt_need_format, -20038); pragma exception_init(bkp_need_format, -20045); begin &1& -- set docopies and convert (if any) &2& -- set rsid &3& -- set rsts &4& -- set reusefile &5& -- set validatecmd and validateopt := FALSE/TRUE &6& -- set docompress := FALSE/TRUE &7& -- set msb_secbytes &msb_secbytes& -- set compressalg &comp_alg& if pltfrmto is not null or pltfrmfr is not null then doconvert := true; end if; -- If returned FALSE, abort the backup job if (NOT beginBackupJobStep()) then return; end if; sys.dbms_backup_restore.backupStatus(state, setid, stamp, pieceno, files, datafiles, incremental, nochecksum, device); if state = sys.dbms_backup_restore.BACKUP_NO_CONVERSATION then goto start_convo; elsif state = sys.dbms_backup_restore.BACKUP_NAMING_FILES then goto name_files; else goto create_piece; end if; <> sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); sys.dbms_backup_restore.backupSetDatafile(stamp, setid, &args_start&, &args_tag&, backup_level => backup_level, imagcp => docopies, convertto => cnvrtto, convertfr => cnvrtfr, pltfrmto => pltfrmto, pltfrmfr => pltfrmfr, sameen => sameen, convertdb => convertdb, validate => validateopt, hdrupd => hdrupd); if hdrupd then krmicd.writeMsg(6782); krmicd.writeMsg(6785); updateok := sys.dbms_backup_restore.UpdateHeaders(); if not updateok then krmicd.writeMsg(6784); krmicd.writeMsg(8191, sys.dbms_backup_restore.getParm( sys.dbms_backup_restore.TRACE_FILENAME)); end if; krmicd.writeMsg(6783); end if; if (noct) then dontcare := sys.dbms_backup_restore.getParm(sys.dbms_backup_restore.incr_noct); end if; if (nooptim) then dontcare := sys.dbms_backup_restore.getParm(sys.dbms_backup_restore.full_nooptim); end if; -- Display appropriate message if docopies then -- convertdb messages are moved to budf_name if not convertdb then if doconvert then krmicd.writeMsg(8589, krmicd.getChid); else krmicd.writeMsg(8580, krmicd.getChid); end if; end if; else if backup_level is not null then if (docompress) then krmicd.writeMsg(8047, krmicd.getChid, to_char(backup_level)); else krmicd.writeMsg(8048, krmicd.getChid, to_char(backup_level)); end if; else if (docompress) then krmicd.writeMsg(8046, krmicd.getChid); elsif (validatecmd) then krmicd.writeMsg(8140, krmicd.getChid); else krmicd.writeMsg(8008, krmicd.getChid); end if; end if; end if; setBackupParams(docopies); <> deb('budf_start', 'set_stamp=' || stamp || ' set_count=' || setid, rman_constant.DEBUG_IO, rman_constant.LEVEL_MIN); >>> # # bual_start: backup archivelogs. This is the first skeleton in the step. # define bual_start <<< -- bual_start /* This must be retriable, which means a backup conversation may already * be in progress when this step (re)executes. */ declare /* backup conversation status variables */ state binary_integer; setid number; stamp number; pieceno binary_integer; files binary_integer; datafiles boolean; incremental boolean; backup_level number; nochecksum boolean; device boolean; /* piece creation variables */ done boolean; concur boolean; chg_tracking_err number; /* Miscellaneous */ memnum number; arch_recid number; arch_stamp number; fname varchar2(512); thread number; sequence number; resetlogs_change number; resetlogs_time varchar2(512); first_change number; next_change number; blksize number; blocks number; copy number; nformat number := 1; handle varchar2(512); comment varchar2(80); media varchar2(80); wrong_format exception; pragma exception_init(wrong_format, -20039); first_time boolean := TRUE; cfisstby boolean := FALSE; elapsed number; starttime date; hours number; mins number; secs number; ncopies number := 0; -- backup_type is used to indicate what type of backup is done. This is used -- to get configured copies, look at krmkgbac for more comments. backup_type number := 1; larchlog_failover boolean; failoverdone boolean := FALSE; docopies boolean := FALSE; reusefile boolean := FALSE; dfnumber number := NULL; tsname varchar2(30) := NULL; m number := 8583; cprecid number; cpstamp number; foreign_dbname varchar2(8) := NULL; foreign_dbid number := NULL; rsid number; rsts number; cptag varchar2(31) := NULL; doconvert boolean := FALSE; docompress boolean := FALSE; compressalg varchar2(80); compressasof number; compresslopt binary_integer; savepiecename boolean := FALSE; transontarget boolean := FALSE; transonlyundo boolean := FALSE; convertdb boolean := FALSE; processfile boolean := TRUE; reqscn number; -- streams/standby required scn rlgscn number; -- streams/standby resetlogs scn appscn number; alldest number := 0; apprlgscn number; reqbackups number; nbackups number; isomf boolean; istmplt boolean; isasm boolean; validatecmd boolean; validateopt boolean; newcorrupt boolean; -- TRUE if new corruption is found msb_secbytes number := 0; cnvrt_need_format exception; bkp_need_format exception; pragma exception_init(cnvrt_need_format, -20038); pragma exception_init(bkp_need_format, -20045); begin -- set docopies &2& -- set rsid &3& -- set rsts &4& -- set reusefile &5& -- set validatecmd and validateopt := FALSE/TRUE &6& -- set docompress := FALSE/TRUE &7& -- set msb_secbytes &msb_secbytes& -- set compressalg &comp_alg& -- If returned FALSE, abort the backup job if (NOT beginBackupJobStep()) then return; end if; sys.dbms_backup_restore.backupStatus(state, setid, stamp, pieceno, files, datafiles, incremental, nochecksum, device); if state = sys.dbms_backup_restore.BACKUP_NO_CONVERSATION then goto start_convo; elsif state = sys.dbms_backup_restore.BACKUP_NAMING_FILES then goto name_files; else goto create_piece; end if; <> sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); sys.dbms_backup_restore.backupSetArchivedLog(set_stamp => stamp, set_count => setid, &args_start&, &args_tag&, imagcp => docopies, validate => validateopt); if not docopies then if (validatecmd) then krmicd.writeMsg(8145, krmicd.getChid); elsif (docompress) then krmicd.writeMsg(8049, krmicd.getChid); else krmicd.writeMsg(8009, krmicd.getChid); end if; else krmicd.writeMsg(8582, krmicd.getChid); end if; setBackupParams(docopies); <> >>> # # bubs_start: backup backupset. This is the first skeleton in the step. # define bubs_start <<< -- bubs_start declare set_count number; set_stamp number; recid number; stamp number; copy_recid number; copy_stamp number; isrdf boolean; pieceno number := 0; -- piece number for this step piececnt binary_integer := 0; -- count of distinct piece copies npieces binary_integer := 0; -- number pieces in backup set type names is table of varchar2(512) index by binary_integer; type ctlid is table of number index by binary_integer; type isrdf_tab is table of boolean index by binary_integer; pnames names; precid ctlid; pstamp ctlid; pisrdf isrdf_tab; /* piece creation variables */ concur boolean; /* Miscellaneous */ fname varchar2(1024); copy number; max_copy binary_integer; ncopies number := 0; -- number of output copies to make docompress boolean := FALSE; nformat number := 1; handle varchar2(512); comment varchar2(80); media varchar2(80); elapsed number; stampd date; hours number; mins number; secs number; cfauto boolean := FALSE; skipped boolean := FALSE; -- skipped this backuppiece chtype varchar2(16); start_time date; savepiecename boolean := FALSE; transontarget boolean := FALSE; transonlyundo boolean := FALSE; convertdb boolean := FALSE; processfile boolean := TRUE; rsid number; rsts number; wrong_format exception; in_use exception; del_for_space exception; isomf boolean; istmplt boolean; isasm boolean; skip_inaccessible boolean; firstscan boolean; pragma exception_init(wrong_format, -20039); pragma exception_init(in_use, -19584); pragma exception_init(del_for_space, -19805); begin -- set_stamp, set_count and npieces &object& -- set rsid &3& -- set rsts &4& if (skip_inaccessible and krmicd.isBsInaccessible(firstscan)) then if (firstscan) then krmicd.writeMsg(8107, to_char(set_count), to_char(set_stamp)); end if; return; end if; krmicd.writeMsg(8104, krmicd.getChid, to_char(set_count), to_char(set_stamp), to_char(pieceno)); -- If returned FALSE, abort the backup job if (NOT beginBackupJobStep()) then return; end if; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); >>> # # copyfil_start: backup as copy file. This is the first skeleton in the step. # define copyfil_start <<< -- copyfil_start declare cpfil_unsupported exception; pragma exception_init(cpfil_unsupported, -16524); type names is table of varchar2(512) index by binary_integer; src_name varchar2(512); lformat names; nformat number := 1; netalias varchar2(1000) := NULL; worked boolean; memnum number; begin -- If returned FALSE, abort the backup job if (NOT beginBackupJobStep()) then return; end if; >>> # # budf_name: backup datafile name. This skeleton appears 1 or more # times in the step. It adds 1 datafile to the backup set. # define budf_name <<< -- budf_name &memnum& &object& if (first_time) then if validatecmd then krmicd.writeMsg(8141, krmicd.getChid); elsif not docopies then krmicd.writeMsg(8010, krmicd.getChid); end if; first_time := FALSE; end if; if files < memnum then begin sys.dbms_backup_restore.backupDataFile(dfnumber => dfnumber, &args&); if convertdb then if transonlyundo then processfile := krmicd.isFileUndo(dfnumber); end if; if processfile then if transontarget then krmicd.writeMsg(8305, krmicd.getChid); else krmicd.writeMsg(8589, krmicd.getChid); end if; end if; end if; if processfile then krmicd.writeMsg(8522, to_char(dfnumber, 'FM09999'), fname); deb('budf_name', 'blocks=' || blocks || ' block_size=' || blksize, rman_constant.DEBUG_IO, rman_constant.LEVEL_MIN); end if; exception when sys.dbms_backup_restore.inc_scn_matches_df_scn then krmicd.writeMsg(8522, to_char(dfnumber, 'FM09999'), fname); krmicd.writeMsg(8056, to_char(dfnumber, 'FM09999')); krmicd.clearErrors; end; files := files + 1; end if; >>> # # budc_name: backup datafilecopy name. # define budc_name <<< -- budc_name &memnum& &object& name_datafilecopy(memnum, copy_recid, copy_stamp, fname, dfnumber, blocks, blksize, tsname, files, docopies, &args&); >>> # # bucv_name: convert file name # define bucv_name <<< -- bucv_name &memnum& &object& if files < memnum then sys.dbms_backup_restore.convertDataFileCopy(fname, &args&); files := files + 1; krmicd.writeMsg(8506, fname); end if; >>> # # busp_name: backup SPFILE name. Appears at most one time in a step. # It adds the current SPFILE to the backup. # define busp_name <<< -- busp_name &memnum& &object& if (first_time) then if validatecmd then krmicd.writeMsg(8141, krmicd.getChid); elsif not docopies then krmicd.writeMsg(8010, krmicd.getChid); end if; first_time := FALSE; end if; sys.dbms_backup_restore.backupSpFile; krmicd.writeMsg(8113); >>> # # bucf_name: backup controlfile name. Appears at most 1 time in a step. # It adds a specified backup controlfile name to the backup set, or # the snapshot controlfile if cfname is null. # define bucf_name <<< -- bucf_name isstby := FALSE; dfnumber := 0; &memnum& &object& if (first_time) then if validatecmd then krmicd.writeMsg(8141, krmicd.getChid); elsif not docopies then krmicd.writeMsg(8010, krmicd.getChid); end if; first_time := FALSE; end if; if (cfname is null and not validateopt and not docopies) then snapshot_cf := TRUE; else snapshot_cf := FALSE; end if; if files < memnum then -- Refresh the snapshot controlfile so that it is reasonably current -- before backing it up. This is necessary because it is possible -- that the snapshot controlfile SCN is zero, indicating that its -- contents are not valid. <> -- retry on failure to get snapshot enqueue begin -- backup current/ for a standby controlfile if snapshot_cf then sys.dbms_backup_restore.cfileMakeAndUseSnapshot(isstby); sys.dbms_backup_restore.cfileUseCurrent; end if; sys.dbms_backup_restore.backupControlFile(cfname => cfname, isstby => isstby, snapshot_cf => snapshot_cf); exception when sys.dbms_backup_restore.snapshot_enqueue_busy then -- retry up to 180 times, waiting 20 seconds between retries, -- thus attempting to get a snapshot control file for an hour. -- Since we are displaying RMAN-08512 message, user will know -- that RMAN is waiting to get snapshot enqueue. if busy_retries = 180 then krmicd.writeMsg(20029, 'cannot make a snapshot controlfile'); raise; end if; busy_retries := busy_retries + 1; -- print this message every 5 minutes if (mod(busy_retries, 15) = 0) then krmicd.writeMsg(8512); end if; krmicd.sleep(20); krmicd.clearErrors; goto snapshot; end; -- snapshot controlfile stuff files := files + 1; if cfname is null then if validatecmd then if isstby then krmicd.writeMsg(8142); else krmicd.writeMsg(8143); end if; elsif not docopies then if isstby then krmicd.writeMsg(8020); else krmicd.writeMsg(8011); end if; else if isstby then krmicd.writeMsg(8585); else krmicd.writeMsg(8584); end if; end if; else krmicd.writeMsg(8524, cfname); deb('bucf_name', 'blocks=' || blocks || ' block_size=' || blksize, rman_constant.DEBUG_IO, rman_constant.LEVEL_MIN); end if; end if; >>> # # bual_name: backup archived log name. # define bual_name <<< -- bual_name name_log(&memnum&, &object&, files=>files, first_time => first_time, docopies => docopies, validatecmd => validatecmd); &1& >>> # # bubp_name: backup backuppiece name. # define bubp_name <<< -- bubp_name &object& -- pnames contains all copies of pieces. pnames(piececnt) := fname; precid(piececnt) := copy_recid; pstamp(piececnt) := copy_stamp; pisrdf(piececnt) := isrdf; piececnt := piececnt + 1; >>> # # copyfil_name: backup as copy file. This skeleton appears 1 time # in the step. It copies 1 file. # define copyfil_name <<< -- copyfil_name &memnum& &object& >>> # # msb: multi-section backup # define msb <<< -- msb if krmicd.firstMSB then sys.dbms_backup_restore.initMSB(dfnumber, msb_file_size, msb_set_stamp, msb_set_count); krmicd.initMSB(msb_file_size, blksize, msb_secbytes, msb_set_stamp, msb_set_count); end if; krmicd.getMSB(msb_section_size, msb_first_section, msb_section_count, msb_set_stamp, msb_set_count, msb_piece_number, msb_piece_count); sys.dbms_backup_restore.setMSB(dfnumber, msb_section_size, msb_first_section, msb_section_count, msb_set_stamp, msb_set_count, msb_piece_number, msb_piece_count); if (validatecmd) then krmicd.writeMsg(8616, msb_section_size * msb_first_section + 1, least(blocks, (msb_section_size * msb_first_section) + (msb_section_size * msb_section_count))); else krmicd.writeMsg(8525, msb_section_size * msb_first_section + 1, least(blocks, (msb_section_size * msb_first_section) + (msb_section_size * msb_section_count))); end if; >>> # # bu_create: backup piece create. This skeleton appears # exactly once. It loops until all pieces have been created. # define bu_create <<< -- bu_create <> sys.dbms_backup_restore.backupStatus(state, setid, stamp, pieceno, files, datafiles, incremental, nochecksum, device); starttime := sysdate; -- If we get here and the files count is zero, then all files in this -- backup set were skipped. if (files = 0) then sys.dbms_backup_restore.backupCancel; krmicd.writeMsg(8057, krmicd.getChid); else -- ncopies will be substituted for ncopies := &ncopies& if (ncopies = 0) then ncopies := krmicd.getBackupCopies(backup_type, krmicd.getDevType); end if; sys.dbms_backup_restore.setLimit(sys.dbms_backup_restore.dupcnt, ncopies); loop if not docopies then krmicd.writeMsg(8038, krmicd.getChid, to_char(pieceno+1), to_char(sysdate)); end if; declare type names is table of varchar2(512) index by binary_integer; fnames names; lformat names; lyear varchar2(4); lday varchar2(2); lmonth varchar2(2); copyaux number; chtype varchar2(16); busy_retries number := 0; piecefmt varchar2(512); orig_fname varchar2(512); set_stamp number; set_count number; deffmt binary_integer; dest binary_integer := 0; netalias varchar2(1000) := NULL; begin <> begin select to_char(sysdate, 'YYYY', 'NLS_CALENDAR=Gregorian'), to_char(sysdate, 'MM', 'NLS_CALENDAR=Gregorian'), to_char(sysdate, 'DD', 'NLS_CALENDAR=Gregorian') into lyear, lmonth, lday from x$dual; -- initialize the format lformat(0) := NULL; -- The following are substituted for variable := value -- format will substituted by array of -- lformat() := '' , nformat := -- and dest &lformat& -- Call genPieceName as many times as ncopies, stored -- generated names into array of fnames. Check in the -- array for duplicated names copy := 0; while copy < ncopies loop begin -- find out the format to use (backup or channel format) piecefmt := lformat(mod(copy, nformat)); krmicd.getFormat(format => piecefmt, copy => copy+1, deffmt => deffmt, dest => dest); -- If doing image copies and the channel is not a DISK, -- then generate a piece name for DISK type because server -- implicitly creates a disk channel. chtype := krmicd.getDevType; if chtype is null then chtype := 'N/A'; elsif (docopies and chtype != 'DISK') then chtype := 'DISK'; end if; &savepiecename& if docopies then orig_fname := fname; else orig_fname := NULL; end if; fnames(copy) := sys.dbms_backup_restore.genPieceName( pno => pieceno+1, set_count => setid, set_stamp => stamp, format => piecefmt, copyno => copy+1, devtype => chtype, year => lyear, month => lmonth, day => lday, dbid => foreign_dbid, ndbname => foreign_dbname, cfseq => NULL, -- not used fileno => dfnumber, tsname => tsname, logseq => to_char(sequence), logthr => thread, imagcp => docopies, savepname => savepiecename, fname => orig_fname, forcnvrt => processfile); -- check if the format specified is an ASM diskgroup istmplt := FALSE; if (chtype = 'DISK') then sys.dbms_backup_restore.isfileNameOMF( fname => fnames(copy), isomf => isomf, isasm => isasm, istmplt => istmplt); if deffmt != 0 then if (doconvert) then raise cnvrt_need_format; elsif (incremental and backup_level is null) then raise bkp_need_format; end if; end if; end if; -- check previous names for collision -- allow OMF template copyaux := 0; while (not istmplt and dest = 0 and copyaux < copy) loop if fnames(copy) = fnames(copyaux) then raise wrong_format; end if; copyaux := copyaux + 1; end loop; -- Store the names of the second copy onwards if (copy > 0) then sys.dbms_backup_restore.backupPieceCrtDupSet(copy, fnames(copy)); end if; end; copy := copy + 1; end loop; -- Ok, now do the actual piece creation, we use the 'old' -- interface copy := 0; -- Do not create a backup piece unless we are processing this -- file (i.e. no skip unnecessary files) and we are not -- converting database on target. if processfile and not transontarget then sys.dbms_backup_restore.backupPieceCreate( fname => fnames(copy), pieceno => pieceno, done => done, handle => handle, comment => comment, media => media, concur => concur, &args_create&, reuse => reusefile, archlog_failover => larchlog_failover, deffmt => deffmt, recid => cprecid, stamp => cpstamp, tag => cptag, dest => dest, post10_2 => TRUE, netalias => netalias, docompress => docompress, compressalg => compressalg, compressasof => compressasof, compresslopt => compresslopt); -- Needed for DUPLICATE DATABASE with no backup krmicd.fileRestored(ftype => rman_constant.DATAFILE, fno => nvl(dfnumber, 0), thread => 0, sequence => 0, resetscn => 0, resetstamp => 0, fname => handle); -- The post10_2 flag indicates that this RMAN client is from -- version 10.2 or newer. The backup piece context is deleted -- in backupCancel() from this version onwards. sys.dbms_backup_restore.pieceContextGetNumber(sys.dbms_backup_restore.signal_change_tracking_error, chg_tracking_err); if chg_tracking_err = 1 then krmicd.writeMsg(8606); end if; else done := TRUE; end if; if larchlog_failover then failoverdone := TRUE; end if; if concur then krmicd.writeMsg(8135, fname); end if; if done then sys.dbms_backup_restore.backupCancel(); end if; exception when sys.dbms_backup_restore.snapshot_enqueue_busy then -- retry up to 180 times, waiting 20 seconds between retries, -- thus attempting to get a snapshot control file for an hour. -- Since we are displaying RMAN-08512 message, user will know -- that RMAN is waiting to get snapshot enqueue. if busy_retries = 180 then krmicd.writeMsg(20029, 'cannot make a snapshot controlfile'); sys.dbms_backup_restore.backupCancel(); raise; end if; busy_retries := busy_retries + 1; -- print this message every 5 minutes if (mod(busy_retries, 15) = 0) then krmicd.writeMsg(8512); end if; krmicd.sleep(20); krmicd.clearErrors; goto snapshot; end; krmicd.writeIOs(stamp, setid); end; -- snapshot controlfile stuff if not docopies then if ncopies = 1 then krmicd.writeMsg(8044, krmicd.getChid, to_char(pieceno), to_char(sysdate)); else -- If we have multiple copies, all copies get the same -- tag, print the tag at the beginning if we have one. if cptag is not null then krmicd.writeMsg(8053, krmicd.getChid, to_char(pieceno), to_char(sysdate), to_char(ncopies), cptag); else krmicd.writeMsg(8045, krmicd.getChid, to_char(pieceno), to_char(sysdate), to_char(ncopies)); end if; end if; end if; copy := 0; -- No messages unless the file is being processed -- (i.e. no skip unnecessary files) and we are not -- converting database on target. if processfile and not transontarget then while copy < ncopies loop if (copy > 0) then -- Retrieve output parameters from server structures sys.dbms_backup_restore.backupPieceCrtDupGet(copy, handle, comment, media); end if; if not docopies then if comment is null then comment := 'NONE'; end if; -- If we have only one copy and a tag, print the tag -- next to tbe handle. If we have more than one piece -- print the handle and comment only, the same if we do -- not have a tag. if ncopies = 1 and cptag is not null then krmicd.writeMsg(8530, handle, cptag, comment); else krmicd.writeMsg(8503, handle, comment); end if; else if doconvert then krmicd.writeMsg(8588, handle); else if cptag is NOT NULL then if cprecid = 0 and cpstamp = 0 then krmicd.writeMsg(8592, handle, cptag); else krmicd.writeMsg(8586, handle, cptag, to_char(cprecid), to_char(cpstamp)); end if; else krmicd.writeMsg(8501, handle, to_char(cprecid), to_char(cpstamp)); end if; end if; end if; copy := copy + 1; end loop; end if; if done then select sysdate-starttime into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); if failoverdone then krmicd.writemsg(8112, krmicd.getChid); end if; if not docopies then m := 8540; elsif processfile then if transontarget then m := 8306; elsif doconvert then m := 8590; end if; end if; if processfile then krmicd.writemsg(m, krmicd.getChid, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); end if; exit; end if; end loop; end if; first_time := TRUE; -- in case we will be deleting what we backed up >>> # # bu_validate: backup validate. This appears instead of bu_create when # 'backup validate' is used. # define bu_validate <<< -- bu_validate <> sys.dbms_backup_restore.backupStatus(state, setid, stamp, pieceno, files, datafiles, incremental, nochecksum, device); starttime := sysdate; -- If we get here and the files count is zero, then all files in this -- backup set were skipped. if (files = 0) then sys.dbms_backup_restore.backupCancel; krmicd.writeMsg(8057, krmicd.getChid); else declare busy_retries number := 0; m number; blkstat sys.dbms_backup_restore.blockStatTable_t; blkRangeTable sys.dbms_backup_restore.blockRangeTable_t; blkRange sys.dbms_backup_restore.blockRange_t; firstcall boolean := TRUE; begin loop exit when not krmicd.validateBlockGetNext( firstcall => firstcall, dfnumber => blkRange.dfnumber, blknumber => blkRange.blknumber, range => blkRange.range); blkRangeTable(blkRangeTable.count + 1) := blkRange; firstcall := FALSE; deb('bu_validate', 'dfnumber = ' || blkRange.dfnumber || ' blknumber = ' || blkRange.blknumber || ' count = ' || blkRange.range); end loop; sys.dbms_backup_restore.validateBlock(blkRangeTable); <> newcorrupt := FALSE; begin sys.dbms_backup_restore.backupValidate( archlog_failover => larchlog_failover, nocleanup => TRUE); if larchlog_failover then failoverdone := TRUE; end if; sys.dbms_backup_restore.getBlockStat(blkstat); for i in 1..blkstat.count loop krmicd.copyBlockStat( filetype => blkstat(i).filetype ,dfnumber => blkstat(i).dfnumber ,thread => blkstat(i).thread ,sequence => blkstat(i).sequence ,highscn => blkstat(i).highscn ,examined => blkstat(i).examined ,corrupt => blkstat(i).corrupt ,empty => blkstat(i).empty ,data_proc => blkstat(i).data_proc ,data_fail => blkstat(i).data_fail ,index_proc => blkstat(i).index_proc ,index_fail => blkstat(i).index_fail ,other_proc => blkstat(i).other_proc ,other_fail => blkstat(i).other_fail); if (blkstat(i).data_fail > 0 or blkstat(i).index_fail > 0 or blkstat(i).other_fail > 0) then newcorrupt := TRUE; end if; end loop; sys.dbms_backup_restore.backupCancel(); exception when sys.dbms_backup_restore.snapshot_enqueue_busy then -- retry up to 180 times, waiting 20 seconds between retries, -- thus attempting to get a snapshot control file for an hour. -- Since we are displaying RMAN-08512 message, user will know -- that RMAN is waiting to get snapshot enqueue. if busy_retries = 180 then krmicd.writeMsg(20029, 'cannot make a snapshot controlfile'); raise; end if; busy_retries := busy_retries + 1; -- print this message every 5 minutes if (mod(busy_retries, 15) = 0) then krmicd.writeMsg(8512); end if; krmicd.sleep(20); krmicd.clearErrors; goto snapshot; end; end; -- <> select sysdate-starttime into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); if failoverdone then krmicd.writemsg(8112, krmicd.getChid); end if; if validatecmd then m := 8144; elsif not docopies then m := 8540; else if doconvert then m := 8590; else m := 8581; end if; end if; krmicd.writemsg(m, krmicd.getChid, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); krmicd.listBlockStat; if (newcorrupt) then krmicd.setErrState; krmicd.writeMsg(8190); krmicd.writeMsg(8191, sys.dbms_backup_restore.getParm( sys.dbms_backup_restore.TRACE_FILENAME)); end if; end if; >>> # # bu_copy: backup piece copy. This skeleton appears # exactly once. It loops until all pieces have been copied. # define bu_copy <<< -- bu_copy select sysdate into start_time from x$dual; max_copy := krmicd.getmaxcopyno(set_stamp => set_stamp, set_count => set_count); if ( piececnt = 0 ) then krmicd.writeMsg(8105, krmicd.getChid); else krmicd.writeMsg(8038, krmicd.getChid, to_char(pieceno), to_char(sysdate)); declare type names is table of varchar2(512) index by binary_integer; copyaux number; fnames names; busy_retries number := 0; lyear varchar2(4); lday varchar2(2); lmonth varchar2(2); lformat names; piecefmt varchar2(512); piececopy number; lcfaudate varchar2(512); cfaudate date; lsequence number; deffmt binary_integer; dest binary_integer := 0; rc binary_integer; rcva_enabled boolean; begin lformat(0) := NULL; -- initalize the format -- The following are substituted for variable := value -- format will substituted by array of -- lformat() := '', nformat := -- and dest -- lcfaudate is set in KRMS_ISO_DATE_FORMAT format &lcfaudate& &lsequence& &ncopies& &lformat& cfaudate := to_date(lcfaudate, 'YYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=Gregorian'); select to_char(nvl(cfaudate, sysdate), 'YYYY', 'NLS_CALENDAR=Gregorian'), to_char(nvl(cfaudate, sysdate), 'MM', 'NLS_CALENDAR=Gregorian'), to_char(nvl(cfaudate, sysdate), 'DD', 'NLS_CALENDAR=Gregorian') into lyear, lmonth, lday from x$dual; if lsequence is not NULL and lsequence >=0 and lsequence <=255 then cfauto := TRUE; -- controlfile autobackup piece ncopies := 1; -- cannot make more than one copy end if; chtype := krmicd.getDevType; if chtype is null then chtype := 'N/A'; end if; -- Call genPieceName as many times as ncopies, stored -- generated names into array of fnames. Check in the -- array for duplicated names copy := 0; while copy < ncopies loop begin -- find out the format to use (backup or channel format) piecefmt := lformat(mod(copy, nformat)); skipped := FALSE; if cfauto then krmicd.getFormat(format => piecefmt, copy => copy+1, cfauto => TRUE, deffmt => deffmt, dest => dest); else krmicd.getFormat(format => piecefmt, copy => copy+1, deffmt => deffmt, dest => dest); end if; fnames(copy) := sys.dbms_backup_restore.genPieceName( pno => pieceno, set_count => set_count, set_stamp => set_stamp, format => piecefmt, copyno => copy+max_copy+1, devtype => chtype, year => lyear, month => lmonth, day => lday, dbid => null, -- computed in server if required ndbname => null, -- computed in server if required pdbname => null, -- computed in server if required cfseq => lsequence); -- check if the format specified is an ASM diskgroup istmplt := FALSE; if (chtype = 'DISK') then sys.dbms_backup_restore.isfileNameOMF( fname => fnames(copy), isomf => isomf, isasm => isasm, istmplt => istmplt); end if; -- check previous names for collision copyaux := 0; while (not istmplt and dest = 0 and copyaux < copy) loop if fnames(copy) = fnames(copyaux) then raise wrong_format; end if; copyaux := copyaux + 1; end loop; end; piececopy := 0; -- start with first copy; <> begin -- BackupBackupPiece should always validate a backup -- piece on a non-disk channel and skip it if the -- piece exists. There should not be two backups of the -- same piece in the recovery area. Thus always validate -- the piece to see if it exists except when the piece -- is going to the recovery area. if cfauto then rcva_enabled := is_recovery_area_enabled(); if chtype != 'DISK' or not rcva_enabled or deffmt = 0 then rc := sys.dbms_backup_restore.validateBackupPiece( recid => 0, stamp => 0, handle => fnames(copy), set_stamp => set_stamp, set_count => set_count, pieceno => 0, params => NULL, hdl_isdisk => 0); if bitand(rc, sys.dbms_backup_restore.validate_file_different) = 0 then skipped := TRUE; end if; elsif rcva_enabled and deffmt != 0 then -- Piece's destination is the recovery area -- Check if source is not the recovery area if pisrdf(piececopy) then skipped := TRUE; end if; end if; end if; if (not skipped) then krmicd.writeMsg(8013, krmicd.getChid, pnames(piececopy)); sys.dbms_backup_restore.backupBackupPiece( bpname => pnames(piececopy), fname => fnames(copy), handle => handle, comment => comment, media => media, concur => concur, recid => recid, stamp => stamp, copyno => copy+max_copy, &args_start&, &args_tag&, &args_create&, &args_reuse&, deffmt => deffmt, copy_recid => precid(piececopy), copy_stamp => pstamp(piececopy), npieces => npieces, dest => dest); if comment is null then comment := 'NONE'; end if; krmicd.writeMsg(8503, handle, comment); else krmicd.writeMsg(8119, pnames(piececopy)); end if; exception when sys.dbms_backup_restore.retryable_error_exp then piececopy := piececopy + 1; if (piececopy >= piececnt) then raise; end if; krmicd.writeMsg(8110); krmicd.clearErrors; -- clear failover errors goto failover; when in_use then krmicd.writeMsg(8603, pnames(piececopy)); piececopy := piececopy + 1; if (piececopy < piececnt) then krmicd.writeMsg(8110); krmicd.clearErrors; -- clear failover errors goto failover; else krmicd.clearErrors; end if; when del_for_space then krmicd.writeMsg(8604, pnames(piececopy)); piececopy := piececopy + 1; if (piececopy < piececnt) then krmicd.writeMsg(8110); krmicd.clearErrors; -- clear failover errors goto failover; else krmicd.clearErrors; end if; end; copy := copy + 1; end loop; if ncopies = 1 then krmicd.writeMsg(8044, krmicd.getChid, to_char(pieceno), to_char(sysdate)); else krmicd.writeMsg(8045, krmicd.getChid, to_char(pieceno), to_char(sysdate), to_char(ncopies)); end if; select sysdate-start_time into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); krmicd.writemsg(8556, krmicd.getChid, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); end; end if; >>> # # copyfil_xfer: provide the format and transfer the file. # define copyfil_xfer <<< -- initialize the format lformat(0) := NULL; -- format will substituted by array of -- lformat() := '' , nformat := -- and dest &lformat& -- An AUXLIARY FORMAT must be specified, format characters not supported -- Use generic error. FORMAT should be enhanced before documenting. if (lformat(0) is null) then raise cpfil_unsupported; end if; worked := sys.dbms_backup_restore.networkFileTransfer( dbname => netalias, username => null, passwd => null, srcfile => src_name, destfile => lformat(0), operation => 'read'); >>> #bug-3174292 - NOTE!! NOTE!!. If you want to add code to this skeleton, #modify del_copy function. define budc_del <<< -- budc_del &object& del_copy(copy_recid, copy_stamp, fname, dfnumber, resetlogs_change, creation_change, checkpoint_change, blksize, no_delete); >>> #bug-3174292 - NOTE!! NOTE!!. If you want to add code to this skeleton #modify del_log function. define bual_del <<< -- bual_del &object& del_log(cfisstby, arch_recid, arch_stamp, fname, thread, sequence, resetlogs_change, resetlogs_time, first_change, blksize, next_change, first_time, docopies, reqscn, rlgscn, appscn, apprlgscn, alldest, reqbackups, nbackups); >>> define bubp_begin_del <<< -- bubp_begin_del declare skipped boolean := FALSE; -- skipped this backuppiece handle varchar2(1024); recid number; stamp number; begin >>> define bubp_del <<< -- bubp_del &object& -- -- backup backupset skips only autobackup pieces if such a name exists -- on this device type. Since the input files for 'backup backupset' are disk -- backups, we can safely delete the input backupsets when skipped on -- on non-disk channels. -- if ((not skipped) or (krmicd.getDevType != 'DISK')) then sys.dbms_backup_restore.changeBackupPiece( handle => handle, recid => recid, stamp => stamp, status => 'S', &args& ); krmicd.writeMsg(8073); krmicd.writeMsg(8517, handle, to_char(recid), to_char(stamp)); end if; >>> define bubp_end_del <<< -- bubp_end_del end; >>> define bu_end <<< -- bu_end if (endBackupJobStep(FALSE, 0)) then null; end if; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); exception when others then if (not endBackupJobStep(TRUE, sqlcode)) then raise; end if; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> define switch_prim_bct <<< begin sys.dbms_backup_restore.switch_primary_bct(); end; >>> # # restoredfc: restore a datafile from a datafile copy # define restoredfc <<< -- restoredfc declare recid number; stamp number; copy_recid number; copy_stamp number; dfnumber number; fname varchar2(512) := null; -- restore dest max_corrupt number := 0; full_name varchar2(512); -- output filename copy varchar2(512); -- input filename check_logical boolean := false; byduplicate boolean := false; blksize number := 0; blocks number := 0; rfno number := 0; lowscn varchar2(41) := null; identical boolean := FALSE; force boolean := FALSE; rsid number; rsts number; tsname varchar2(512) := null; found boolean; begin &object& found := krmicd.valGetFound(copy_recid, copy_stamp); sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); krmicd.writeMsg(8019, krmicd.getChid, to_char(dfnumber, 'FM09999')); krmicd.writeMsg(8507, to_char(copy_recid), to_char(copy_stamp), copy); if fname is not null then krmicd.writeMsg(8509, to_char(dfnumber, 'FM09999'), fname); if (identical and force) then -- FORCE option was used to restore and the backup choosen was same as -- that of output filename. Just return and let restore failover take -- care of restore. krmicd.writeMsg(8552, fname); return; end if; end if; begin if byduplicate and copy is not null and fname is not null then sys.dbms_backup_restore.resDataFileCopy( cname => copy, fname => fname, full_name => full_name, max_corrupt => max_corrupt, check_logical => check_logical, blksize => blksize, blocks => blocks, fno => dfnumber, scnstr => lowscn, rfno => rfno, tsname => tsname); krmicd.writeMsg(8007, krmicd.getChid, to_char(dfnumber, 'FM09999')); krmicd.writeMsg(8505, full_name); else sys.dbms_backup_restore.copyDataFileCopy( full_name => full_name, recid => recid, stamp => stamp, fname => fname, copy_recid => copy_recid, copy_stamp => copy_stamp, max_corrupt => max_corrupt, check_logical => check_logical); krmicd.writeMsg(8007, krmicd.getChid, to_char(dfnumber, 'FM09999')); krmicd.writeMsg(8501, full_name, to_char(recid),to_char(stamp)); end if; krmicd.fileRestored(ftype => rman_constant.DATAFILE, fno => dfnumber, thread => 0, sequence => 0, resetscn => 0, resetstamp => 0, fname => full_name); exception when sys.dbms_backup_restore.retryable_error_exp then if not krmicd.doRestoreFailover(rman_constant.DATAFILE) then raise; end if; -- display the error message as a warning and do restore failover krmicd.writeErrMsg(1005, sqlerrm); krmicd.clearErrors; when others then raise; end; end; >>> # # restorecfc: restore a controlfile from a controlfile copy # define restorecfc <<< -- restorecfc declare src_name varchar2(512); dest_name varchar2(512); full_name varchar2(512); recid number; stamp number; currcf boolean; identical boolean := FALSE; force boolean := FALSE; rsid number; rsts number; isstby boolean := FALSE; begin &object& sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); krmicd.writeMsg(8021, krmicd.getChid); if (not currcf) then krmicd.writeMsg(8505, dest_name); end if; if (identical and force) then -- FORCE option was used to restore and the backup choosen was same as -- that of output filename. Just return and let restore failover take -- care of restore. krmicd.writeMsg(8552, dest_name); return; end if; begin sys.dbms_backup_restore.copyControlFile(full_name => full_name, recid => recid, stamp => stamp, src_name => src_name, dest_name => dest_name); krmicd.writeMsg(8025, krmicd.getChid); krmicd.writeMsg(8506, src_name); if (currcf) then print_controlfile; else krmicd.writeMsg(8501, full_name, to_char(recid), to_char(stamp)); end if; krmicd.fileRestored(ftype => rman_constant.CONTROLFILE, fno => 0, thread => 0, sequence => 0, resetscn => 0, resetstamp => 0); exception when sys.dbms_backup_restore.retryable_error_exp then if not krmicd.doRestoreFailover(rman_constant.CONTROLFILE) then raise; end if; -- display the error message as a warning and do restore failover krmicd.writeErrMsg(1005, sqlerrm); krmicd.clearErrors; when others then raise; end; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> # # rsdf_start: restore datafiles/controlfiles/SPFILEs. This is the first # skeleton in the step. # define rsdf_start <<< -- rsdf_start declare /* restoreStatus */ state binary_integer; pieces_done binary_integer; files binary_integer; datafiles boolean; incremental boolean; device boolean; /* restorebackuppiece */ done boolean; currcf boolean; fhandle varchar2(512); handle varchar2(512); outhandle varchar2(512); params varchar2(512); fromdisk boolean; -- TRUE => backupset on disk /* Miscellaneous */ memnum number; piecenum number; dfnumber number; thread number := null; sequence number := null; toname varchar2(512); tsname varchar2(512); cfname varchar2(512); pfname varchar2(512); sfname varchar2(512); set_count number; set_stamp number; first_time boolean := TRUE; validate boolean := FALSE; -- TRUE => only validate val_bs_only boolean := FALSE; -- TRUE => only bs validation -- means, no failover necessary max_corrupt binary_integer := 0; check_logical boolean := FALSE; tag varchar2(31); outtag varchar2(31); bmr boolean := FALSE; blocks number; blksize number; failover boolean := FALSE; devtype varchar2(512); rsid number; rsts number; err_msg varchar2(2048); start_time date; elapsed number; hours number; mins number; secs number; recid number; stamp number; preview boolean := FALSE; recall boolean := FALSE; isstby boolean := FALSE; msrpno number := 0; msrpct number := 0; restore_not_complete exception; begin &1& -- if this is preview command, don't do anything if preview then deb('rsdf_start', 'preview'); return; end if; sys.dbms_backup_restore.restoreStatus(state, pieces_done, files, datafiles, incremental, device); if (msrpno > 1) then -- set this to msrpno-1, not just msrpno, to offset the increment of -- pieces_done before checking for restore completion. I don't want to -- change the piece completion logic because that would require adding -- multi-section variables to all restore conversations. pieces_done := msrpno - 1; end if; select sysdate into start_time from x$dual; if state = sys.dbms_backup_restore.restore_no_conversation then goto start_convo; elsif state = sys.dbms_backup_restore.restore_naming_files then goto name_files; else goto restore_piece; end if; <> sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); sys.dbms_backup_restore.restoreSetDatafile( check_logical => check_logical ,cleanup => FALSE); incremental := FALSE; if bmr then krmicd.writeMsg(8106, krmicd.getChid); elsif validate then krmicd.writeMsg(8096, krmicd.getChid); else krmicd.writeMsg(8016, krmicd.getChid); end if; setRestoreParams; <> deb('rsdf_start', 'set_stamp=' || set_stamp || ' set_count=' || set_count, rman_constant.DEBUG_IO, rman_constant.LEVEL_MIN); >>> # # ridf_start: restore incremental datafiles backup set start # define ridf_start <<< -- ridf_start declare /* restoreStatus */ state binary_integer; pieces_done binary_integer; files binary_integer; datafiles boolean; incremental boolean; device boolean; /* restorebackuppiece */ done boolean; currcf boolean; fhandle varchar2(512); handle varchar2(512); outhandle varchar2(512); params varchar2(512); fromdisk boolean; -- TRUE => backupset on disk /* Miscellaneous */ memnum number; piecenum number; dfnumber number; thread number := null; sequence number := null; toname varchar2(512); cfname varchar2(512); fuzzy_hint number; set_count number; set_stamp number; first_time boolean := TRUE; validate boolean := FALSE; val_bs_only boolean := FALSE; -- TRUE => only bs validation -- means, no failover necessary max_corrupt binary_integer := 0; check_logical boolean := FALSE; tag varchar2(31); outtag varchar2(31); bmr boolean := FALSE; blocks number; blksize number; failover boolean := FALSE; devtype varchar2(512); rcvcopy boolean := FALSE; islevel0 binary_integer := 0; rsid number; rsts number; err_msg varchar2(2048); start_time date; elapsed number; hours number; mins number; secs number; recid number; stamp number; preview boolean := FALSE; recall boolean := FALSE; isstby boolean := FALSE; restore_not_complete exception; begin &1& -- if this is preview command, don't do anything if preview then deb('ridf_start', 'preview'); return; end if; sys.dbms_backup_restore.restoreStatus(state, pieces_done, files, datafiles, incremental, device); select sysdate into start_time from x$dual; if state = sys.dbms_backup_restore.restore_no_conversation then goto start_convo; elsif state = sys.dbms_backup_restore.restore_naming_files then goto name_files; else goto restore_piece; end if; <> sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); sys.dbms_backup_restore.applySetDatafile( check_logical => check_logical ,cleanup => FALSE); incremental := TRUE; krmicd.writeMsg(8039, krmicd.getChid); setRestoreParams; <> deb('ridf_start', 'set_stamp=' || set_stamp || ' set_count=' || set_count, rman_constant.DEBUG_IO, rman_constant.LEVEL_MIN); >>> # # rsdf_name - name a datafile to be restored from a backup set # define rsdf_name <<< -- rsdf_name toname := null; max_corrupt := 0; &memnum& &object& if msrpno > 1 and not bmr then declare tempfno number; begin krmicd.getMSR(tempfno, toname); end; end if; if files < memnum then sys.dbms_backup_restore.restoreDataFileTo(dfnumber => dfnumber, toname => toname, max_corrupt => max_corrupt, tsname => tsname); if msrpno = 1 and not bmr then sys.dbms_backup_restore.initMSR(dfnumber, toname); end if; if msrpno > 0 then krmicd.setMSR(dfnumber, toname); end if; if first_time then if bmr then krmicd.writeMsg(8108, krmicd.getChid); else krmicd.writeMsg(8089, krmicd.getChid); end if; first_time := FALSE; end if; if bmr then krmicd.writeMsg(8533, to_char(dfnumber, 'FM09999')); else if toname is not null then krmicd.writeMsg(8610, krmicd.getChid, to_char(dfnumber, 'FM09999'), toname); else krmicd.writeMsg(8610, krmicd.getChid, to_char(dfnumber, 'FM09999'), 'default location'); end if; deb('rsdf_name', 'blocks=' || blocks || ' block_size=' || blksize, rman_constant.DEBUG_IO, rman_constant.LEVEL_MIN); end if; if (msrpno > 0) then krmicd.writeMsg(8555, krmicd.getChid, to_char(msrpno), to_char(msrpct)); end if; end if; >>> # # ridf_name - name a datafile to be restored from an incremental backup set # define ridf_name <<< -- ridf_name toname := null; max_corrupt := 0; &memnum& &object& if files < memnum then sys.dbms_backup_restore.applyDataFileTo(dfnumber => dfnumber, toname => toname, fuzziness_hint => fuzzy_hint, max_corrupt => max_corrupt, islevel0 => islevel0, recid => recid, stamp => stamp); if first_time then if bmr then krmicd.writeMsg(8108, krmicd.getChid); elsif rcvcopy then krmicd.writeMsg(8131, krmicd.getChid); else krmicd.writeMsg(8089, krmicd.getChid); end if; first_time := FALSE; end if; if bmr then krmicd.writeMsg(8533, to_char(dfnumber, 'FM09999')); elsif toname is not null then if rcvcopy then krmicd.writeMsg(8551, to_char(dfnumber, 'FM09999'), toname); else krmicd.writeMsg(8509, to_char(dfnumber, 'FM09999'), toname); end if; deb('ridf_name', 'blocks=' || blocks || ' block_size=' || blksize, rman_constant.DEBUG_IO, rman_constant.LEVEL_MIN); end if; end if; >>> # # rscf_name - restore the controlfile in a datafile restore conversation # define rscf_name <<< -- rscf_name &object& &memnum& if files < memnum then sys.dbms_backup_restore.restoreControlfileTo(cfname => cfname, isstby => isstby); krmicd.writeMsg(8021, krmicd.getChid); if (not currcf) then krmicd.writeMsg(8505, cfname); end if; end if; >>> # # rssf_name - restore the SPFILE in a datafile restore conversation # define rssf_name <<< -- rssf_name &object& &memnum& if files < memnum then sys.dbms_backup_restore.restoreSpfileTo(pfname => pfname, sfname => sfname); if pfname is not null then krmicd.writeMsg(8114, krmicd.getChid); krmicd.writeMsg(8505, pfname); elsif sfname is not null then krmicd.writeMsg(8115, krmicd.getChid); krmicd.writeMsg(8505, sfname); else krmicd.writeMsg(8115, krmicd.getChid); krmicd.writeMsg(8116, sfname); end if; end if; >>> # # restore_piece_label - the beginning of the piece restore section # define restore_piece_label <<< -- restore_piece_label <> >>> # # rspiece_init - initialize piece variables for restore # define rspiece_init <<< -- rspiece_init fhandle := NULL; &piecenum& >>> # # rspiece_name - name a piece during piece restore # define rspiece_name <<< -- rspiece_name &object& -- handle, tag, fromdisk, recid, stamp if (pieces_done+1) = piecenum then sys.dbms_backup_restore.restoreSetPiece(handle => handle, tag => tag, fromdisk => fromdisk, recid => recid, stamp => stamp); if fhandle is NULL then fhandle := handle; end if; end if; >>> # # restore_piece - restore one backup piece. Applies to all types of full # restore conversations. # define restore_piece <<< -- restore_piece if (pieces_done+1) = piecenum then begin if (fhandle is not NULL) then krmicd.writeMsg(8003, krmicd.getChid, fhandle); end if; sys.dbms_backup_restore.restoreBackupPiece(done => done, params => params, outhandle => outhandle, outtag => outtag, failover => failover); exception when sys.dbms_backup_restore.retryable_error_exp then err_msg := sqlerrm; if not krmicd.doRestoreFailover(rman_constant.BACKUPPIECE) then raise; end if; -- if this was a validate request for a backup set, then -- raise an error. There is no failover, and we want to report -- the error to the OS. if (val_bs_only) then raise; end if; if (not validate) then getFileRestored(FALSE); end if; devtype := krmicd.checkBsFailover; if (incremental and devtype is null) then raise; end if; -- display the error message as a warning and do restore failover krmicd.writeErrMsg(8615, krmicd.getChid||': '||err_msg); krmicd.clearErrors; if (devtype is not null) then krmicd.writeMsg(8612, krmicd.getChid, devtype); end if; goto restore_failover; when others then raise; end; if failover then krmicd.writeMsg(8614, krmicd.getChid, fhandle); krmicd.writeMsg(8613, krmicd.getChid, outhandle, nvl(outtag, 'NULL')); else krmicd.writeMsg(8611, krmicd.getChid, outhandle, nvl(outtag, 'NULL')); end if; if bmr then krmicd.writeMsg(8109, krmicd.getChid, to_char(piecenum)); else krmicd.writeMsg(8023, krmicd.getChid, to_char(piecenum)); krmicd.writeIOs(set_stamp, set_count); end if; if done then select abs(sysdate-start_time) into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); if (bmr) then krmicd.writeMsg(8183, krmicd.getChid, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); elsif (validate) then krmicd.writeMsg(8182, krmicd.getChid, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); else krmicd.writeMsg(8180, krmicd.getChid, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); end if; -- print all restored controlfile names if currcf then print_controlfile; end if; if validate then -- validate backupset, restore validate cmds krmicd.fileRestored(ftype => rman_constant.BACKUPPIECE, fno => 0, thread => 0, sequence => 0, resetscn => 0, resetstamp => 0); else getFileRestored(FALSE); end if; sys.dbms_backup_restore.restoreCancel(TRUE); return; end if; pieces_done := pieces_done + 1; end if; >>> define restore_end <<< -- restore_end krmicd.writeMsg(8001); if not krmicd.doRestoreFailover(rman_constant.BACKUPPIECE) then begin sys.dbms_backup_restore.restoreCancel(FALSE); exception when others then krmicd.writeMsg(1005, 'a. dbms_backup_restore.restoreCancel() failed'); end; raise restore_not_complete; end if; -- Restore not complete because file header in backuppiece is corrupted -- and hence couldn't find the end of restore. Anyway try a previous backup -- for full-restore always and for incrementals only when there exists a -- duplex backup on another device. if (not validate) then getFileRestored(FALSE); end if; devtype := krmicd.checkBsFailover; if (incremental and devtype is null) then begin sys.dbms_backup_restore.restoreCancel(TRUE); exception when others then krmicd.writeMsg(1005, 'b. dbms_backup_restore.restoreCancel() failed'); end; end if; if (dfnumber is not null) then krmicd.writeMsg(1005, 'Restore did not complete for some' || ' files from backup piece ' || outhandle || ' (piecenum=' || to_char(piecenum) || ', pieces_done=' || to_char(pieces_done) || ', done=' || bool2char(done) || ', failover=' || bool2char(failover) || ')'); else krmicd.writeMsg(1005, 'Restore did not complete for some' || ' archived logs from backup piece ' || outhandle || ' (piecenum=' || to_char(piecenum) || ', pieces_done=' || to_char(pieces_done) || ', done=' || bool2char(done) || ', failover=' || bool2char(failover) || ')'); end if; krmicd.writeMsg(1005, 'Please check alert log for ' || 'additional information.'); if (devtype is not null) then krmicd.writeMsg(8612, krmicd.getChid, devtype); end if; -- Try a backup previous to this one <> begin sys.dbms_backup_restore.restoreCancel(FALSE); exception when others then krmicd.writeMsg(1005, 'c. dbms_backup_restore.restoreCancel() failed'); end; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> define msr_finish <<< -- msr_finish declare dfnumber number; msrfname varchar2(1024); begin krmicd.getMSR(dfnumber, msrfname); sys.dbms_backup_restore.setParms(p0 => 5, p1 => dfnumber, p5 => msrfname); end; >>> # # rsal_start: restore archived logs. This is the first skeleton in the step. # define rsal_start <<< -- rsal_start declare /* restoreStatus */ state binary_integer; pieces_done binary_integer; files binary_integer; datafiles boolean; incremental boolean; device boolean; /* restorebackuppiece */ done boolean; currcf boolean; -- unused here cfname varchar2(512); fhandle varchar2(512); handle varchar2(512); outhandle varchar2(512); params varchar2(512); fromdisk boolean; -- TRUE => backupset on disk /* Miscellaneous */ memnum number; piecenum number; dfnumber number := null; thread number; sequence number; destination varchar2(512) := null; validate boolean := FALSE; val_bs_only boolean := FALSE; -- TRUE => only bs validation -- means, no failover necessary tag varchar2(31); outtag varchar2(31); bmr boolean := FALSE; set_count number; set_stamp number; failover boolean := FALSE; devtype varchar2(512); rsid number; rsts number; err_msg varchar2(2048); start_time date; elapsed number; hours number; mins number; secs number; recid number; stamp number; savepiecename boolean := FALSE; transontarget boolean := FALSE; preview boolean := FALSE; recall boolean := FALSE; isstby boolean := FALSE; restore_not_complete exception; log_included exception; pragma exception_init(log_included, -19636); begin &1& -- if this is preview command, don't do anything if preview then deb('rsal_start', 'preview'); return; end if; sys.dbms_backup_restore.restoreStatus(state, pieces_done, files, datafiles, incremental, device); select sysdate into start_time from x$dual; if state = sys.dbms_backup_restore.restore_no_conversation then goto start_convo; elsif state = sys.dbms_backup_restore.restore_naming_files then goto name_files; else goto restore_piece; end if; <> &object& sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); sys.dbms_backup_restore.restoreSetArchivedLog( destination=>destination ,cleanup => FALSE); incremental := FALSE; if validate then krmicd.writeMsg(8097, krmicd.getChid); elsif destination is null then krmicd.writeMsg(8017, krmicd.getChid); else krmicd.writeMsg(8018, krmicd.getChid); krmicd.writeMsg(8508, destination); end if; setRestoreParams; <> deb('rsal_start', 'set_stamp=' || set_stamp || ' set_count=' || set_count, rman_constant.DEBUG_IO, rman_constant.LEVEL_MIN); >>> # # rsal_name - name an archived log to be restored from a backup set # define rsal_name <<< -- rsal_name &memnum& &object& if files < memnum then begin sys.dbms_backup_restore.restoreArchivedLog(thread => thread, sequence => sequence); krmicd.writeMsg(8022, krmicd.getChid); krmicd.writeMsg(8510, to_char(thread), to_char(sequence)); %IF% target exception -- it is possible to include two times same log due to duplicate brl -- entries due to the bug 6253529. hence tolerate log already included -- error during restore. when log_included then null; %ENDIF% target end; end if; >>> # # switch: # define 'switch' <<< -- switch declare copy_recid number; copy_stamp number; catalog boolean; dfnumber number; fname varchar2(512); begin &object& sys.dbms_backup_restore.switchToCopy(copy_recid => copy_recid ,copy_stamp => copy_stamp ,catalog => catalog); krmicd.writeMsg(8015, to_char(dfnumber)); krmicd.writeMsg(8507, to_char(copy_recid), to_char(copy_stamp), fname); end; >>> define tdb_gtsc <<< -- tdb_gtsc /* This is to generate the transport script */ declare tscname varchar2(512); pfformat varchar2(512); rmtscname varchar2(512); pfname varchar2(512); newtscname varchar2(512); newrmtscname varchar2(512); begin &tscname& &pfformat& &rmtscname& sys.dbms_backup_restore.genTransportScript(tscname, pfformat, rmtscname, pfname, newtscname, newrmtscname, &args&); krmicd.writeMsg(8301, pfname); if rmtscname is not null then krmicd.writeMsg(8302, newrmtscname); end if; if tscname is not null then krmicd.writeMsg(8300, newtscname); end if; end; >>> define tdb_lock <<< /* This is to lock transportable db context */ declare newdbname varchar2(10); begin &newdbname& sys.dbms_backup_restore.TransportDBLock(newdbname); end; >>> define tdb_unlock <<< /* This is to unlock transportable db context */ begin sys.dbms_backup_restore.TransportDBUnlock; krmicd.writeMsg(8303); krmicd.writeMsg(8304); end; >>> # # sql: arbitrary sql # define 'sql' <<< -- sql begin krmicd.execSql(&args&); end; >>> # # # TESTING Only! # define testkrm <<< begin krmicd.writeMsg(1005, null); end; >>> # smr is divided into three parts, because the 'alter database recover' # statement may need to be repeated when ALTER DATABASE RECOVER DATAFILE LIST # is used, as part of RECOVER DATABASE. # # smr1: start media recovery - part 1 (always used) - start of block # define 'smr1' <<< -- smr1 declare mr_cancelled exception; pragma exception_init(mr_cancelled, -283); mr_need_log exception; pragma exception_init(mr_need_log, -279); mr_not_needed exception; pragma exception_init(mr_not_needed, -264); bmr_block_errors exception; pragma exception_init(bmr_block_errors, -19680); thread number; sequence number; scn number; dellog binary_integer; recid binary_integer; stamp binary_integer; seq binary_integer; resetlogs_change number; first_change number; blksize binary_integer; logname varchar2(512); stopthd number; stopseq number; toclause boolean; -- TRUE = TO CLAUSE; FALSE = TO BEFORE CLAUSE bmr boolean := FALSE; rls number; -- resetlogs scn for required log rlc number; -- resetlogs count for required log flash boolean := FALSE; stoprcv boolean; start_time date; elapsed number; hours number; mins number; secs number; begin begin &object& select sysdate into start_time from x$dual; krmicd.writeMsg(8054); -- starting media recovery krmicd.execSQL('alter database recover datafile list clear'); >>> # # smr2: start media recovery - part 2 - RECOVER DATAFILE LIST # This is added either in start media recovery or log apply step. # It is not added for BMR or flashback. define 'smr2' <<< -- smr2 krmicd.execSql('alter database recover datafile list' || '&args&'); >>> # # smr3: start media recovery - part 3 (always used) - remainder of block # define 'smr3' <<< -- smr3 if bmr then sys.dbms_backup_restore.bmrDoMediaRecovery(NULL); begin sys.dbms_backup_restore.bmrCancel; exception when bmr_block_errors then krmicd.writeMsg(8111); end; elsif flash then begin sys.dbms_backup_restore.flashbackFiles(NULL); krmicd.checkSetDatabase; exception when others then krmicd.checkSetDatabase; raise; end; sys.dbms_backup_restore.flashbackCancel; krmicd.checkSetDatabase; else krmicd.execSql('alter database recover if needed' || '&args&'); end if; select abs(sysdate-start_time) into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); -- media recovery complete krmicd.writeMsg(8181, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); return; exception when mr_cancelled then krmicd.writeMsg(8059); raise; when mr_need_log then krmicd.clearErrors; when mr_not_needed then krmicd.clearErrors; select abs(sysdate-start_time) into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); -- media recovery complete krmicd.writeMsg(8181, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); return; when others then krmicd.writeMsg(8059); raise; end; toclause := krmicd.checkUntil(stopthd, stopseq); select resetlogs_change# into resetlogs_change from v$database_incarnation where status='CURRENT'; select thr, seq, scn, rls, rlc into thread, sequence, scn, rls, rlc from x$kcrmx; if (resetlogs_change = rls and stopthd = thread and ((sequence >= stopseq and toclause = FALSE) OR (sequence > stopseq and toclause = TRUE))) then if bmr then begin sys.dbms_backup_restore.bmrCancel; exception when bmr_block_errors then krmicd.writeMsg(8111); end; elsif flash then sys.dbms_backup_restore.flashbackCancel; krmicd.checkSetDatabase; else krmicd.execSql('alter database recover cancel'); end if; select abs(sysdate-start_time) into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); -- media recovery complete krmicd.writeMsg(8181, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); return; end if; end; >>> # # Log apply PL/SQL step for datafile recovery is # (log_apply1 + smr2 + log_apply3) # Log apply PL/SQL step for bmr is # (log_apply1 + log_apply3) # log_apply1: part 1 - Log apply step # The step has capability to re-start recovery in case of ADD DATAFILE # redo. This libmem is always followed by smr2 for datafile recovery. # define 'log_apply1' <<< -- log_apply declare mr_cancelled exception; pragma exception_init(mr_cancelled, -283); -- Bug 2603837: Catch ORA-288, and issue "alter db recover continue". mr_cont_rcv exception; pragma exception_init(mr_cont_rcv, -288); mr_need_log exception; pragma exception_init(mr_need_log, -279); mr_aborted exception; pragma exception_init(mr_aborted, -20500); bmr_block_errors exception; pragma exception_init(bmr_block_errors, -19680); mr_createdf exception; pragma exception_init(mr_createdf, -20505); archivelog_backup_not_found exception; pragma exception_init(archivelog_backup_not_found, -20506); alfrec v$archived_log%ROWTYPE; dellog boolean := FALSE; -- set by rman compiler bmr boolean := FALSE; -- set by rman compiler flash boolean := FALSE; -- set by rman compiler scn number; stopthd number; stopseq number; stoprcv boolean; rlc number; -- resetlogs count resetlogs_change number; -- resetlogs SCN used when untilLog is set start_time date; elapsed number; hours number; mins number; secs number; -- following variables are introduced for create datafile. unnamed varchar2(1024); dfname varchar2(1024); newdfname varchar2(1024); fileno number := 0; recovdf boolean := false; filelist varchar2(512):=NULL; tmp number:=0; toclause boolean; tsnum number; tsname varchar2(32); bnewomf boolean; dropf boolean; createdf boolean := false; type numTab_t is table of number index by binary_integer; df_offln_list numTab_t; function continue_rcv(createdf OUT boolean) return boolean is begin createdf := false; <> begin krmicd.clearErrors; krmicd.execSql('alter database recover continue'); exception when mr_cont_rcv then goto do_cont_again; when mr_need_log then return true; when mr_createdf then createdf := true; return true; end; return false; end; begin &object& toclause := krmicd.checkUntil(stopthd, stopseq); <> -- recovery is never restarted for bmr begin select sysdate into start_time from x$dual; -- recovdf true indicates we hit a create datafile redo, and need to -- re-start recovery session. if not bmr and not flash and recovdf then deb('apply_log', 're-start recovery'); krmicd.execSQL('alter database recover datafile list clear'); if filelist is not null then krmicd.execSql('alter database recover datafile list '||filelist); end if; >>> # log_apply3: part 2 of Log apply step for BMR, while part 3 of Log apply # step for datafile recovery. define 'log_apply3' <<< krmicd.execSql('alter database recover if needed' || '&args&'); fileno := 0; recovdf := false; end if; exception when mr_need_log then krmicd.clearErrors; end; -- fall thru after re-starting recovery or at the begining <> if createdf then createdf := false; raise mr_createdf; end if; begin select thr, seq, scn, rls, rlc into alfrec.thread#, alfrec.sequence#, scn, alfrec.resetlogs_change#, rlc from x$kcrmx; exception when no_data_found then if bmr then begin sys.dbms_backup_restore.bmrCancel; exception when bmr_block_errors then krmicd.writeMsg(8111); end; elsif flash then sys.dbms_backup_restore.flashbackCancel; krmicd.checkSetDatabase; end if; -- presumably, the media recovery has completed -- There could be some logs which are restored but the recovery -- didn't need them, so call delete_logs once more. -- -- If files are restored to recovery area, then delete them even if -- user didn't ask which is the behavior for recovery area in order -- to effectively do disk space management. -- delete_logs(FALSE, dellog); -- delete all remaining logs return; end; select resetlogs_change# into resetlogs_change from v$database_incarnation where status='CURRENT'; if (resetlogs_change=alfrec.resetlogs_change# and stopthd = alfrec.thread# and alfrec.sequence# >= stopseq) then stoprcv := FALSE; if bmr then begin sys.dbms_backup_restore.bmrCancel; exception when bmr_block_errors then krmicd.writeMsg(8111); end; stoprcv := TRUE; elsif flash then -- When to_before is implemented we need to stop recovery when -- sequence = stopseq if alfrec.sequence# > stopseq then sys.dbms_backup_restore.flashbackCancel; krmicd.checkSetDatabase; stoprcv := TRUE; end if; else krmicd.execSql('alter database recover cancel'); stoprcv := TRUE; end if; if stoprcv then -- -- If files are restored to recovery area, then delete them even if -- user didn't ask which is the behavior for recovery area in order -- to effectively do disk space management. -- delete_logs(FALSE, dellog); -- delete all remaining logs select abs(sysdate-start_time) into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); -- media recovery complete krmicd.writeMsg(8181, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); return; end if; end if; begin deb('log_apply', 'looking for log with scn ' ||scn||' thread='|| alfrec.thread#||' sequence='||alfrec.sequence# ||' resetlogs scn '|| alfrec.resetlogs_change#||' resetlogs time='|| to_char(stamp2date(rlc))); -- call krmkclog to check and get the log which has same thread# and SCN alfrec.name := krmicd.checkLog(scn, alfrec.thread#, alfrec.sequence#, alfrec.recid, alfrec.stamp, alfrec.resetlogs_change#, stamp2date(rlc), alfrec.first_change#, alfrec.next_change#, alfrec.block_size); exception when archivelog_backup_not_found then raise; when others then if (is_db_in_noarchivelog) then krmicd.writeMsg(8187, to_char(scn)); else krmicd.writeMsg(8060); -- unable to find log krmicd.writeMsg(8510, to_char(alfrec.thread#), to_char(alfrec.sequence#)); raise; end if; end; deb('log_apply', 'log file name returned is ' || alfrec.name ); begin if alfrec.name is not NULL then if bmr then sys.dbms_backup_restore.bmrDoMediaRecovery(alfrec.name); elsif flash then sys.dbms_backup_restore.flashbackFiles(alfrec.name); else krmicd.writeMsg(8515, alfrec.name, to_char(alfrec.thread#), to_char(alfrec.sequence#)); -- bug 1582073 - substitute single quote in alname by 2 single quote krmicd.execSql( 'alter database recover logfile ''' || replace(alfrec.name,'''','''''') || ''''); end if; -- -- If files are restored to recovery area, then delete them even if -- user didn't ask which is the behavior for recovery area in order -- to effectively do disk space management. -- delete_logs(FALSE, dellog); -- delete all remaining logs if bmr then begin sys.dbms_backup_restore.bmrCancel; exception when bmr_block_errors then krmicd.writeMsg(8111); end; elsif flash then sys.dbms_backup_restore.flashbackCancel; krmicd.checkSetDatabase; end if; select abs(sysdate-start_time) into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); -- media recovery complete krmicd.writeMsg(8181, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); return; else return; end if; exception when mr_cont_rcv then if continue_rcv(createdf) then goto get_log; end if; when mr_need_log then -- call delete_logs to delete archive logs which are older than -- recovery checkpoint scn krmicd.clearErrors; -- -- If files are restored to recovery area, then delete them even if -- user didn't ask which is the behavior for recovery area in order -- to effectively do disk space management. -- delete_logs(TRUE, dellog); goto get_log; when mr_createdf then if (bmr or flash) then raise; end if; -- select a datafile and the corresponding tablespace that it belongs to for df_rec in (select fnfno, fnnam, fnonm, ts.ts#, ts.name from x$kccfn, x$kccfe , v$tablespace ts where fnunn = 1 and fnfno=fenum and fefnh=fnnum and fetsn=ts.ts#) loop fileno := df_rec.fnfno; unnamed := df_rec.fnnam; dfname := df_rec.fnonm; tsnum := df_rec.ts#; tsname := df_rec.name; deb('apply_log', 'tsnum ' || tsnum); deb('apply_log', 'tsname ' || tsname); deb('apply_log', 'fileno ' || fileno); deb('apply_log', 'dfname ' || dfname); deb('apply_log', 'file old name is ' || dfname); recovdf := true; if krmicd.getDfInfo(fileno, tsnum, tsname, newdfname, bnewomf, dropf) then if (newdfname is not null) then dfname := newdfname; deb('apply_log', 'file new name is ' || newdfname); else deb('apply_log', 'using name at creation ' || dfname); end if; krmicd.writeMsg(6064, fileno, dfname); sys.dbms_backup_restore.createDatafile(fno => fileno, newomf => bnewomf, recovery => TRUE, fname => dfname); -- We add the file to list of datafiles to recover. if filelist is not null then filelist := filelist || ', ' || fileno; else filelist := fileno; end if; else dfname := null; deb('apply_log', 'no filename - ignore creation of file# ' || fileno); deb('apply_log', 'This is recover database skip tablespace cmd'); if (df_offln_list.exists(fileno)) then deb('apply_log', 'file is already offlined ' || fileno); else df_offln_list(fileno) := 1; if (dropf = true) then krmicd.writeMsg(6958, 'alter database datafile ' || fileno || ' offline drop'); krmicd.execSql('alter database datafile ' || fileno || ' offline drop'); else krmicd.writeMsg(6958, 'alter database datafile ' || fileno || ' offline'); krmicd.execSql('alter database datafile ' || fileno || ' offline'); end if; end if; end if; end loop; -- clear create datafile redo entry, we are done with error processing -- recovery will be restarted based on recovdf and filelist values. krmicd.clearErrors; goto restart_recovery; end; end; >>> # # scr_cre_rep: create or replace a script # define 'scr_cre_rep' <<< -- scr_cre_rep declare creat boolean; line varchar2(1024); name varchar2(100); scr_com varchar2(255); global boolean; begin &args& if creat then dbms_rcvcat.createScript(name, scr_com, global); else dbms_rcvcat.replaceScript(name, scr_com, global); end if; loop line := krmicd.getLine; if line is null then exit; end if; dbms_rcvcat.putLine(line); end loop; dbms_rcvcat.commitChanges; if creat then if global then krmicd.writeMsg(8150, name); else krmicd.writeMsg(8085, name); end if; else if global then krmicd.writeMsg(8151, name); else krmicd.writeMsg(8086, name); end if; end if; end; >>> # # replicate_controlfile: multiplex a controlfile from one source file to all # of the locations specified in the target database's init.ora file. # define replicate_controlfile <<< declare cfname varchar2(512); begin &object& replicate_controlfile(cfname => cfname); end; >>> define 'x$replicate_controlfile' <<< procedure replicate_controlfile(cfname IN varchar2) IS src_name varchar2(512); dest_name varchar2(512); full_name varchar2(512); recid number; stamp number; firstcall boolean := TRUE; begin src_name := sys.dbms_backup_restore.normalizefilename(cfname); for i in 1..9999 loop dest_name := sys.dbms_backup_restore.getparm( sys.dbms_backup_restore.control_file, i); if dest_name is null then exit; end if; if src_name <> dest_name then if firstcall then krmicd.writeMsg(8058); krmicd.writeMsg(8506, src_name); firstcall := FALSE; end if; krmicd.writeMsg(8505, dest_name); sys.dbms_backup_restore.copyControlFile(src_name => src_name, dest_name => dest_name, recid => recid, stamp => stamp, full_name => full_name); end if; end loop; end; >>> define 'x$print_controlfile' <<< procedure print_controlfile is src_name varchar2(512); dest_name varchar2(512); begin for i in 1..9999 loop dest_name := sys.dbms_backup_restore.getparm( sys.dbms_backup_restore.control_file, i); exit when dest_name is null; krmicd.writeMsg(8505, dest_name); end loop; end; >>> # rpctest: test async RPCs # define "rpctest" <<< -- rpctest begin sys.dbms_backup_restore.sleep(60); -- 1 minute end; >>> # # tspitr_rescf: restore controlfile to auxiliary instance # define tspitr_rescf <<< # restore the controlfile restore clone controlfile; # mount the controlfile sql clone 'alter database mount clone database'; # archive current online log sql 'alter system archive log current'; >>> define tspitr_noauto <<< # avoid unnecessary autobackups for structural changes during TSPITR sql 'begin dbms_backup_restore.AutoBackupFlag(FALSE); end;'; >>> # # tspitr_resync: resync catalog # define tspitr_resync <<< # resync catalog resync catalog; >>> # # tspitr_until: set until clause in script # define tspitr_until <<< { # set requested point in time set until &1&; >>> # # tspitr_offrecset: offline the tablespaces of the recovery set # define tspitr_offrecset <<< plsql &begin& -- tspitr_2 declare sqlstatement varchar2(512); offline_not_needed exception; pragma exception_init(offline_not_needed, -01539); begin sqlstatement := 'alter tablespace '|| &1& ||' offline immediate'; krmicd.writeMsg(6162, sqlstatement); krmicd.execSql(sqlstatement); exception when offline_not_needed then null; end; &end& ; >>> # # tspitr_flipcom: comment for files that are switched to datafilecopies # define tspitr_flipcom <<< # switch to valid datafilecopies >>> # # tspitr_flip: switch a datafile that we are FLIPing. # define tspitr_flip <<< switch clone datafile &1& to datafilecopy &2&; >>> # # tspitr_newcom : comment for datafile newnames # define tspitr_newcom <<< # set destinations for recovery set and auxiliary set datafiles >>> # # tspitr_newdf: set restore destination for a datafile # define tspitr_newdf <<< set newname for datafile &1& to &2&; >>> # # tspitr_newomfaux: set omf restore destination for a datafile in the auxset. # define tspitr_newomfaux <<< set newname for clone datafile &1& to new; >>> # # tspitr_newomfrec: set omf restore destination for a datafile in the recset. # define tspitr_newomfrec <<< set newname for datafile &1& to new; >>> # # tspitr_newtemp: set restore destination for a tempfile. # define tspitr_newtemp <<< set newname for tempfile &1& to &2&; >>> # # tspitr_newomftemp: set omf restore destinations for a tempfile. # define tspitr_newomftemp <<< set newname for clone tempfile &1& to new; >>> # # tspitr_switchtemp: Switch tempfiles # define tspitr_switchtemp <<< # switch all tempfiles switch clone tempfile all; >>> # # tspitr_restore: restore and switch all to restored datafiles # define tspitr_restore <<< # restore the tablespaces in the recovery set and the auxiliary set restore clone datafile &1&; switch clone datafile all; } >>> # # tspitr_onlinecom: comment for onlining of restored datafiles # define tspitr_onlinecom <<< # online the datafiles restored or switched >>> # # tspitr_onlinedf: online a datafile we restored. # define tspitr_onlinedf <<< sql clone "alter database datafile &1& online"; >>> # # tsiptr_recover: perform imcomplete recovery and open db with resetlogs. # define tspitr_recover <<< # recover and open resetlogs recover clone database tablespace &1& delete archivelog; alter clone database open resetlogs; } >>> # # tspitr_onlinetscom: comment for onlining of tablespaces to be exported # define tspitr_onlinetscom <<< # online the tablespaces that will be exported >>> # # tspitr_onts: online a tablespace that will be exported. # define tspitr_onts <<< sql clone 'alter tablespace &1& online'; >>> # # tspitr_readonlycom: comment for making read only the tablespaces to be # exported # define tspitr_readonlycom <<< # make read only the tablespace that will be exported >>> # # tspitr_rots: set read only the tablespaces that will be exported. # define tspitr_rots <<< sql clone 'alter tablespace &1& read only'; >>> # # tspitr_mkclonedir: create directory for datapump export. # define tspitr_mkclonedir <<< # create directory for datapump export sql clone "create or replace directory &1& as ''&2&''"; >>> # # tspitr_mktargetdir: create directory for datapump import. # define tspitr_mktargetdir <<< # create directory for datapump import sql "create or replace directory &1& as ''&2&''"; >>> # # tspitr_uncatcom: comment for uncatalog used datafilecopies # define tspitr_uncatcom <<< # uncatalog used datafilecopies >>> # # tspitr_uncatcopy: uncatalog an used datafilecopy # define tspitr_uncatcopy <<< change datafilecopy &1& uncatalog; >>> # # tspitr_dropcom: comment for dropping tablespaces # define tspitr_dropcom <<< # drop target tablespaces before importing them back >>> # # tspitr_dropkeepts: drop a tablespace before importing # define tspitr_dropkeepts <<< sql 'drop tablespace &1& including contents keep datafiles'; >>> # # tspitr_dropts: drop a tablespace before importing # define tspitr_dropts <<< sql 'drop tablespace &1& including contents'; >>> # # tspitr_shutclone: shutdown clone # define tspitr_shutclone <<< # shutdown clone before import shutdown clone immediate >>> # # tspitr_readwritecom: comment for read write and offline of imported # tablespaces # (for getting the correct resetlogs SCN) # define tspitr_readwritecom <<< # make read write and offline the imported tablespaces >>> # # tspitr_rwts: make read write and offline a imported tablespace # define tspitr_rwts <<< sql 'alter tablespace &1& read write'; sql 'alter tablespace &2& offline'; >>> # # tspitr_yesauto: enable autobackups # define tspitr_yesauto <<< # enable autobackups after TSPITR is finished sql 'begin dbms_backup_restore.AutoBackupFlag(TRUE); end;'; >>> # # impscrpt_0: sample command for importing tablespaces # define impscrpt_0 <<< /* The following command may be used to import the tablespaces. Substitute values for and . impdp directory= dumpfile=&1& transport_datafiles=&2& */ -------------------------------------------------------------- -- Start of sample PL/SQL script for importing the tablespaces -------------------------------------------------------------- -- creating directory objects >>> # # impscrpt_1: create the directory objects that will be used # define impscrpt_1 <<< CREATE DIRECTORY &1& AS &2&; >>> # # impscrpt_2: start plsql block for importing tablespaces # define impscrpt_2 <<< /* PL/SQL Script to import the exported tablespaces */ DECLARE -- the datafiles tbs_files dbms_streams_tablespace_adm.file_set; cvt_files dbms_streams_tablespace_adm.file_set; -- the dumpfile to import dump_file dbms_streams_tablespace_adm.file; dp_job_name VARCHAR2(30) := NULL; -- names of tablespaces that were imported ts_names dbms_streams_tablespace_adm.tablespace_set; BEGIN -- dump file name and location dump_file.file_name := &1&; dump_file.directory_object := '&2&'; -- forming list of datafiles for import >>> # # impscrpt_3: add the datafiles # define impscrpt_3 <<< tbs_files(&1&).file_name := &2&; tbs_files(&3&).directory_object := &4&; >>> # # impscrpt_4: form attach_tablespace call and close plsql block # define impscrpt_4 <<< -- import tablespaces dbms_streams_tablespace_adm.attach_tablespaces( datapump_job_name => dp_job_name, dump_file => dump_file, tablespace_files => tbs_files, converted_files => cvt_files, tablespace_names => ts_names); -- output names of imported tablespaces IF ts_names IS NOT NULL AND ts_names.first IS NOT NULL THEN FOR i IN ts_names.first .. ts_names.last LOOP dbms_output.put_line('imported tablespace '|| ts_names(i)); END LOOP; END IF; END; / -- dropping directory objects >>> # # impscrpt_5: drop the directory objects that were created # define impscrpt_5 <<< DROP DIRECTORY &1&; >>> # # impscrpt_6: end of sample PL/SQL script # define impscrpt_6 <<< -------------------------------------------------------------- -- End of sample PL/SQL script -------------------------------------------------------------- >>> # # validateset: validate backupset # define validateset <<< -- validateset sys.dbms_backup_restore.restorevalidate; >>> # # scandfc: scan datafilecopy # define scandfc <<< -- scandfc declare recid number; stamp number; fname varchar2(512); dfnumber number; scn number; check_logical boolean := false; found boolean; begin &object& found := krmicd.valGetFound(recid, stamp); if dfnumber = 0 then krmicd.writeMsg(8518, krmicd.getChid, fname); else krmicd.writeMsg(8519, krmicd.getChid, fname); end if; scn := sys.dbms_backup_restore.scandatafilecopy( recid => recid, stamp => stamp, update_fuzziness =>false, check_logical => check_logical); if dfnumber = 0 then krmicd.fileRestored(ftype => rman_constant.CONTROLFILE, fno => 0, thread => 0, sequence => 0, resetscn => 0, resetstamp => 0); else krmicd.fileRestored(ftype => rman_constant.DATAFILE, fno => dfnumber, thread => 0, sequence => 0, resetscn => 0, resetstamp => 0, fname => fname); end if; exception when sys.dbms_backup_restore.retryable_error_exp then if not krmicd.doRestoreFailover(rman_constant.DATAFILE) then raise; end if; -- display the error message as a warning and do restore failover krmicd.writeErrMsg(1005, sqlerrm); krmicd.clearErrors; when others then raise; end; >>> # # bscandfc: bmr scan datafilecopy # define bscandfc <<< -- bscandfc declare recid number; stamp number; found boolean; fname varchar2(512); begin &object& found := krmicd.valGetFound(recid, stamp); krmicd.writeMsg(8532, krmicd.getChid, fname); sys.dbms_backup_restore.bmrscandatafilecopy(recid, stamp); exception when sys.dbms_backup_restore.retryable_error_exp then if not krmicd.doRestoreFailover(rman_constant.DATAFILE) then raise; end if; -- display the error message as a warning and do restore failover krmicd.writeErrMsg(1005, sqlerrm); krmicd.clearErrors; when others then raise; end; >>> # # scanal: scan archived log # define scanal <<< -- scanal declare record_not_found exception; pragma exception_init(record_not_found, -19571); recid number; stamp number; fname varchar2(512); full_name varchar2(512); rsid number; rsts number; isstby boolean := FALSE; begin &object& krmicd.writeMsg(8520, krmicd.getChid, fname); sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); <> -- retry on non-cataloged archived log begin sys.dbms_backup_restore.scanarchivedlog(recid => recid, stamp => stamp); exception when record_not_found then sys.dbms_backup_restore.inspectArchivedLog(fname => fname, full_name => full_name, recid => recid, stamp => stamp); deb('scanal', 'cataloged archive log recid=' || recid || ' stamp=' || stamp); krmicd.clearErrors; goto scan_log; end; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> # # setcmdid: set command id # define setcmdid <<< -- setcmdid declare cmdid varchar2(512); begin &object& if krmicd.getDevType is null then return; end if; sys.dbms_backup_restore.set_client_info( 'id='||cmdid||',rman channel='||krmicd.getchid); end; >>> # # aor: apply offline range # define aor <<< -- aor declare cfname varchar2(512) := null; recid number; stamp number; copy_recid number := 0; copy_stamp number := 0; fname varchar2(512) := null; dfnumber binary_integer := null; blksize number := null; begin &1& -- set variables sys.dbms_backup_restore.applyOfflineRange(cfname => cfname, dfname => fname, blksize => blksize, recid => recid, stamp => stamp, fno => dfnumber, dfrecid => copy_recid, dfstamp => copy_stamp); if (fname is not null) then krmicd.writeMsg(8088, fname); else krmicd.writeMsg(8088, to_char(dfnumber, 'FM09999')); end if; krmicd.writeMsg(8521, to_char(recid), to_char(stamp)); end; >>> # # proxy_backup_start: proxy backup start # define proxy_backup_start <<< -- proxy_backup_start declare dfnumber number; copy_recid number; copy_stamp number; busy_retries number; resetlogs_change number; resetlogs_time varchar2(512); creation_change number; checkpoint_change number; blksize number; fname varchar2(1024); fmt varchar2(1024); no_delete binary_integer; first_time boolean := TRUE; cfisstby boolean := FALSE; incremental boolean := FALSE; media_pool binary_integer := 0; set_type varchar2(30); tag varchar2(32) := NULL; elapsed number; start_time date; hours number; mins number; secs number; pieceno number := 0; set_stamp number; set_count number; cfname varchar2(1); -- not used for backup currcf boolean := FALSE; -- not used for backup validate boolean := FALSE; -- not used for backup val_bs_only boolean := FALSE; -- TRUE => only bs validation -- means, no failover necessary keep_options binary_integer := 0; keep_until number := 0; lyear varchar2(4); lmonth varchar2(2); lday varchar2(2); chtype varchar2(16); isstby boolean; handle varchar2(512); arch_recid number; arch_stamp number; thread number; sequence number; first_change number; next_change number; deffmt binary_integer; dest binary_integer := 0; isrestore boolean := FALSE; -- not a proxy restore rsid number; rsts number; reqscn number; -- streams/standby required scn rlgscn number; -- streams/standby resetlogs scn appscn number; apprlgscn number; reqbackups number; nbackups number; alldest number := 0; docopies boolean := FALSE; in_use exception; del_for_space exception; no_files exception; pragma exception_init(in_use, -19584); pragma exception_init(del_for_space, -19805); pragma exception_init(no_files, -19581); begin &1& if incremental then set_type := 'incremental level 0'; else set_type := 'full'; end if; select to_char(sysdate, 'YYYY', 'NLS_CALENDAR=Gregorian'), to_char(sysdate, 'MM', 'NLS_CALENDAR=Gregorian'), to_char(sysdate, 'DD', 'NLS_CALENDAR=Gregorian') into lyear, lmonth, lday from x$dual; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); sys.dbms_backup_restore.proxyBeginBackup(tag => tag, incremental => incremental, media_pool => media_pool, set_stamp => set_stamp, set_count => set_count, keep_options => keep_options, keep_until => keep_until); start_time := stamp2date(set_stamp); >>> # # proxy_budf: proxy backup datafile name. This skeleton appears 1 or more # times in the step. It adds 1 datafile to the list of files to be # proxy backed up. # define proxy_budf <<< -- proxy_budf fmt := NULL; &1& -- fmt chtype := krmicd.getDevType; krmicd.getFormat(format => fmt, copy => 1, deffmt => deffmt, dest => dest); if chtype is null then chtype := 'N/A'; end if; loop exit when not krmicd.proxyBackupDataFile(dfnumber, fname); pieceno := pieceno + 1; if (first_time) then krmicd.writeMsg(8527, krmicd.getChid, set_type, to_char(stamp2date(set_stamp))); krmicd.writeMsg(8091, krmicd.getChid); first_time := FALSE; end if; handle := sys.dbms_backup_restore.genPieceName( pno => pieceno, set_count => set_count, set_stamp => set_stamp, format => fmt, copyno => 1, devtype => chtype, year => lyear, month => lmonth, day => lday, dbid => null, -- computed in server ndbname => null, -- computed in server pdbname => null, -- computed in server cfseq => NULL); -- not used sys.dbms_backup_restore.proxyBackupDataFile(file# => dfnumber, handle => handle); krmicd.writeMsg(8522, to_char(dfnumber, 'FM09999'), fname); krmicd.writeMsg(8529, handle); end loop; >>> # # proxy_budc: proxy backup datafilecopy name # define proxy_budc <<< -- proxy_budc fmt := NULL; &1& -- fmt chtype := krmicd.getDevType; krmicd.getFormat(format => fmt, copy => 1, deffmt => deffmt, dest => dest); if chtype is null then chtype := 'N/A'; end if; loop exit when not krmicd.proxyBackupDataFileCopy(dfnumber, fname, copy_recid, copy_stamp); pieceno := pieceno + 1; if (first_time) then krmicd.writeMsg(8527, krmicd.getChid, set_type, to_char(stamp2date(set_stamp))); krmicd.writeMsg(8091, krmicd.getChid); first_time := FALSE; end if; handle := sys.dbms_backup_restore.genPieceName( pno => pieceno, set_count => set_count, set_stamp => set_stamp, format => fmt, copyno => 1, devtype => chtype, year => lyear, month => lmonth, day => lday, dbid => null, -- computed in server ndbname => null, -- computed in server pdbname => null, -- computed in server cfseq => NULL); -- not used begin sys.dbms_backup_restore.proxyBackupDataFileCopy( copy_recid => copy_recid, copy_stamp => copy_stamp, handle => handle); krmicd.writeMsg(8033, krmicd.getChid, to_char(dfnumber, 'FM09999')); krmicd.writeMsg(8506, fname); krmicd.writeMsg(8529, handle); exception when in_use then krmicd.writeMsg(8603, fname); krmicd.clearErrors; when del_for_space then krmicd.writeMsg(8604, fname); krmicd.clearErrors; end; end loop; >>> # # proxy_bucf: proxy backup controlfile name. Appears at most 1 time in # a step. It adds a specified backup controlfile name to the proxy # conversation, or the snapshot controlfile if cfname is null. # define proxy_bucf <<< -- proxy_bucf fmt := NULL; &1& -- fmt chtype := krmicd.getDevType; krmicd.getFormat(format => fmt, copy => 1, deffmt => deffmt, dest => dest); if chtype is null then chtype := 'N/A'; end if; loop fname := NULL; exit when not krmicd.proxyBackupControlfile(fname); pieceno := pieceno + 1; if (first_time) then krmicd.writeMsg(8527, krmicd.getChid, set_type, to_char(stamp2date(set_stamp))); krmicd.writeMsg(8091, krmicd.getChid); first_time := FALSE; end if; -- Refresh the snapshot controlfile so that it is reasonably current -- before backing it up. This is necessary because it is possible -- that the snapshot controlfile SCN is zero, indicating that its -- contents are not valid. busy_retries := 0; <> -- retry on failure to get snapshot enqueue begin if fname is null then -- backup current controlfile sys.dbms_backup_restore.cfileMakeAndUseSnapshot(isstby); sys.dbms_backup_restore.cfileUseCurrent; end if; handle := sys.dbms_backup_restore.genPieceName( pno => pieceno, set_count => set_count, set_stamp => set_stamp, format => fmt, copyno => 1, devtype => chtype, year => lyear, month => lmonth, day => lday, dbid => null, -- computed in server ndbname => null, -- computed in server pdbname => null, -- computed in server cfseq => NULL); -- not used sys.dbms_backup_restore.proxyBackupControlFile(name => fname, handle => handle); exception when sys.dbms_backup_restore.snapshot_enqueue_busy then -- retry up to 180 times, waiting 20 seconds between retries, -- thus attempting to get a snapshot control file for an hour. -- Since we are displaying RMAN-08512 message, user will know -- that RMAN is waiting to get snapshot enqueue. if busy_retries = 180 then krmicd.writeMsg(20029, 'cannot make a snapshot controlfile'); raise; end if; busy_retries := busy_retries + 1; -- print this message every 5 minutes if (mod(busy_retries, 15) = 0) then krmicd.writeMsg(8512); end if; krmicd.sleep(20); krmicd.clearErrors; goto snapshot; end; -- snapshot controlfile stuff if fname is null then if isstby then krmicd.writeMsg(8099); else krmicd.writeMsg(8093); end if; else krmicd.writeMsg(8524, fname); end if; krmicd.writeMsg(8529, handle); end loop; >>> # # proxy_bual: proxy archivelog name. This skeleton appears 1 or more # times in the step. It adds 1 archivelog to the list of files to be # proxy backed up. # define proxy_bual <<< -- proxy_bual fmt := NULL; &1& -- fmt chtype := krmicd.getDevType; krmicd.getFormat(format => fmt, copy => 1, deffmt => deffmt, dest => dest); if chtype is null then chtype := 'N/A'; end if; loop fname := NULL; exit when not krmicd.proxyBackupArchivedLog(thread, sequence, fname, arch_recid, arch_stamp); pieceno := pieceno + 1; if (first_time) then krmicd.writeMsg(8542, krmicd.getChid, to_char(stamp2date(set_stamp))); krmicd.writeMsg(8543, krmicd.getChid); first_time := FALSE; end if; handle := sys.dbms_backup_restore.genPieceName( pno => pieceno, set_count => set_count, set_stamp => set_stamp, format => fmt, copyno => 1, devtype => chtype, year => lyear, month => lmonth, day => lday, dbid => null, -- computed in server ndbname => null, -- computed in server pdbname => null, -- computed in server cfseq => NULL); -- not used begin sys.dbms_backup_restore.proxyBackupArchivedlog( arch_recid => arch_recid, arch_stamp => arch_stamp, handle => handle); krmicd.writeMsg(8504, to_char(thread), to_char(sequence), to_char(arch_recid), to_char(arch_stamp)); krmicd.writeMsg(8529, handle); exception when in_use then krmicd.writeMsg(8603, fname); krmicd.clearErrors; when del_for_space then krmicd.writeMsg(8604, fname); krmicd.clearErrors; end; end loop; >>> # # proxy_restore_start: restore datafiles/controlfiles. This is the first # skeleton in the step. # define proxy_restore_start <<< -- proxy_restore_start declare currcf boolean; handle varchar2(512); dfnumber number; toname varchar2(512); cfname varchar2(512); destination varchar2(512) := null; first_time boolean := TRUE; start_time date; elapsed number; hours number; mins number; secs number; recid number; stamp number; validate boolean; val_bs_only boolean := FALSE; -- TRUE => only bs validation -- means, no failover necessary vrc binary_integer; validation_failure exception; thread number; sequence number; resetlogs_id number; -- ub4 value of resetlogs timestamp, used to construct -- unique names during proxy archivelog restores. isrestore boolean := TRUE; -- is a proxy restore rsid number; rsts number; tsname varchar2(512); blksize number; blocks number; no_files exception; pragma exception_init(no_files, -19581); begin &1& sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); if not validate then sys.dbms_backup_restore.proxyBeginRestore( destination => destination ,cleanup => FALSE); end if; select sysdate into start_time from x$dual; if (validate) then krmicd.writeMsg(8100, krmicd.getChid); else krmicd.writeMsg(8090, krmicd.getChid); end if; >>> # # proxy_rsdf - name a datafile to be proxy restored # define proxy_rsdf <<< -- proxy_rsdf loop exit when not krmicd.proxyRestoreDatafile(handle, dfnumber, toname, blocks, blksize, tsname); sys.dbms_backup_restore.proxyRestoreDatafile(handle => handle, file# => dfnumber, toname => toname, tsname => tsname, blksize => blksize, blocks => blocks); if first_time then krmicd.writeMsg(8094, krmicd.getChid); first_time := FALSE; end if; krmicd.writeMsg(8610, krmicd.getChid, to_char(dfnumber, 'FM09999'), toname); krmicd.writeMsg(8529, handle); end loop; >>> # # proxy_rscf - restore the controlfile in a datafile proxy restore conversation # define proxy_rscf <<< -- proxy_rscf loop exit when not krmicd.proxyRestoreControlfile(handle, cfname, currcf, blocks, blksize); sys.dbms_backup_restore.proxyRestoreControlfile(handle => handle, toname => cfname, blksize => blksize, blocks => blocks); if first_time then krmicd.writeMsg(8094, krmicd.getChid); first_time := FALSE; end if; krmicd.writeMsg(8021, krmicd.getChid); if (cfname is not null) then krmicd.writeMsg(8505, cfname); end if; krmicd.writeMsg(8529, handle); end loop; >>> # # proxy_rsal - name a archivelog to be proxy restored # define proxy_rsal <<< -- proxy_rsal loop exit when not krmicd.proxyRestoreArchivedLog(handle, thread, sequence, resetlogs_id, blocks, blksize); sys.dbms_backup_restore.proxyRestoreArchivedLog( handle => handle, thread => thread, sequence => sequence, resetlogs_id =>resetlogs_id, blksize => blksize, blocks => blocks); if first_time then krmicd.writeMsg(8544, krmicd.getChid); first_time := FALSE; end if; krmicd.writeMsg(8529, handle); krmicd.writeMsg(8022, krmicd.getChid); krmicd.writeMsg(8510, to_char(thread), to_char(sequence)); end loop; >>> # # proxy_val - validate a proxy backup # define proxy_val <<< -- proxy_val loop exit when not krmicd.proxyValOnly(handle, recid, stamp); vrc := sys.dbms_backup_restore.proxyValOnly(recid, stamp, handle); if vrc < sys.dbms_backup_restore.validate_file_different then krmicd.writeMsg(8531, krmicd.getChid, handle, 'FOUND'); krmicd.fileRestored(ftype => rman_constant.PROXYFILE, fno => 0, thread => 0, sequence => 0, resetscn => 0, resetstamp => 0); else krmicd.writeMsg(8531, krmicd.getChid, handle, 'NOT FOUND'); end if; end loop; >>> # # proxy_go: do the proxy backup. Appears exactly once. # define proxy_go <<< -- proxy_go if not validate then begin sys.dbms_backup_restore.proxyGo; if isrestore then getFileRestored(TRUE); end if; sys.dbms_backup_restore.proxyCancel; -- this could also signal error exception when sys.dbms_backup_restore.retryable_error_exp then if (isrestore and krmicd.doRestoreFailover(rman_constant.PROXYFILE)) then -- display the error message as a warning and do restore failover krmicd.writeErrMsg(1005, sqlerrm); krmicd.clearErrors; else raise; end if; when no_files then if (not isrestore) then krmicd.writeMsg(8057, krmicd.getChid); krmicd.clearErrors; else raise; end if; when others then raise; end; -- print all restored controlfile names if currcf then print_controlfile; end if; select sysdate - start_time into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); krmicd.writemsg(8528, krmicd.getChid, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); end if; >>> # # proxy_end: end the proxy conversation # define proxy_end <<< if validate then krmicd.writeMsg(8101, krmicd.getChid); end if; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> # # Duplicate database skeletons # # # General use skeletons # Used in any of the 4 phases of the duplicate database # # Begin of script define cl_beg <<< { >>> # Set until clause for restore and for recover # &1& indicates the scn to which to restore or recover define cl_unt1 <<< set until scn &1&; >>> # &1& indicates the time to which to restore or recover define cl_unt2 <<< set until time &1&; >>> # &1& indicates the restore point to which to restore the controlfile # this is necessary because otherwise we will not find the correct # controlfile that corresponds to the restore point define cl_unt3 <<< set to restore point &1&; >>> # Catalog all datafilecopies, the skeleton is in 3 parts: # cl_catb for the first datafilecopy # cl_cat for the second to last datafilecopy # cl_cate to finish the command # &1& is the filename of the datafilecopy to catalog define cl_catb <<< catalog clone datafilecopy &1& >>> define cl_cat <<< , &1& >>> define cl_cate <<< ; >>> # End of command define cl_cend <<< ; >>> # End of script define cl_end <<< } >>> # Restore phase skeletons # Used only by the restore phase of the clone database # # Restore of clone define res_cl_res <<< restore >>> # Restore of clone spfile define res_cl_spf <<< restore clone spfile to &1&; >>> # Restore of clone spfile from backup define res_cl_spffb <<< restore clone spfile to &1& from &2&; >>> # Copy file to clone define cop_cl_file <<< targetfile &1& auxiliary format &2& >>> # Set spfile define cop_set_spf <<< sql clone "alter system set spfile=&1&"; >>> define set_cl_spfp <<< sql clone "alter system set &1& = &2& comment=&3& scope=spfile"; >>> define set_cl_spfr <<< sql clone "alter system reset &1& scope=spfile"; >>> define cspfile_cl <<< sql clone "create spfile from memory"; >>> define cpfile_cl <<< sql clone "create pfile=&1& from spfile"; >>> # Skip clause for restore, used for offline and readonly # &1& indicates the comma separated list of tablespaces to skip define cl_skp <<< skip forever tablespace &1& >>> define cl_skp_ro <<< skip readonly >>> # clone database keywords define cl_db <<< clone database >>> # clone datafile keywords define cl_df <<< clone datafile &1& >>> #standby keyword define cl_stby <<< standby >>> # Create an image copy for duplicate in the restore phase. define cop_cl_bck <<< backup as copy reuse >>> # Create an image copy for duplicate in the restore phase. # &1& is the number of the datafile # &2& is the new name for the datafile define cop_cl_df <<< datafile &1& auxiliary format &2& >>> # Create an image copy for duplicate in the restore phase. # &1& is the number of the datafile # The destination format is provide from set newname. define copn_cl_df <<< datafile &1& auxiliary format new >>> # Copy the archive log define cop_cl_al <<< archivelog like &1& auxiliary format &2& >>> # Add standby logfile group define cl_add_srlg <<< sql clone "alter database add standby logfile group &1& &2& size &3& &4&"; >>> # Add standby logfile member define cl_add_srl <<< sql clone "alter database add standby logfile member &1& reuse to group &2&"; >>> # Drop standby logfile group define cl_drop_srlg <<< sql clone "alter database drop standby logfile group &1&"; >>> define cat_cl_al1 <<< catalog clone archivelog &1&; >>> define cat_cl_al2 <<< catalog clone recovery area; >>> define cat_cl_al3 <<< catalog clone start with &1&; >>> # Archive current log define arc_cl_log <<< sql 'alter system archive log current'; >>> # Set newname for datafile # &1& is the number of the datafile # &2& is the new name for the datafile define res_cl_set <<< set newname for datafile &1& to &2&; >>> # Set newname to NEW for datafile # &1& is the number of the datafile define res_cl_setn <<< set newname for clone datafile &1& to new; >>> # Switch all datafiles define res_cl_swi <<< switch clone datafile all; >>> # Set newname for tempfile # &1& is the number of the tempfile # &2& is the new name for the tempfile define res_cl_sett <<< set newname for tempfile &1& to &2&; >>> # Set newname to NEW for tempfile # &1& is the number of the tempfile define res_cl_setnt <<< set newname for clone tempfile &1& to new; >>> # Switch all tempfiles define res_cl_swit <<< switch clone tempfile all; >>> # Recover phase skeletons # Used only by the recover phase of the clone database # # Recover database define rec_cl_rec <<< recover >>> # Comma between arguments define rec_cl_comma <<< , >>> # With noredo define rec_cl_noredo <<< noredo >>> #With delete input define rec_cl_delinp <<< delete archivelog >>> # Open database phase skeletons # Used only by the open database phase of the clone database # # # Shutdown the auxiliary database define opn_cl_shi <<< shutdown clone immediate; >>> # Startup nomount the auxiliary database with a pfile define opn_cl_stp <<< startup clone nomount pfile=&1&; >>> # Startup nomount the auxiliary database with a spfile define opn_cl_stsp <<< startup clone nomount; >>> # Switch manually each datafile # &1& is the number of the datafile # &2& is the name of the datafile to which we are switching define opn_cl_swi <<< switch clone datafile &1& to datafilecopy &2&; >>> # # Open database # define opn_cl_end <<< Alter clone database open resetlogs; >>> # # Enable restricted session # define en_rest_sess <<< sql clone 'alter system enable restricted session'; >>> # # online readonly tablespaces # define cl_onl_ts <<< #online the readonly tablespace sql clone "alter tablespace &1& online"; >>> # Backup of control file (for nobackup duplicate) define cop_cl_cf <<< shutdown clone immediate; startup clone force nomount backup as copy current controlfile auxiliary format &1&; >>> # Backup of standby control file (for nobackup duplicate) define cop_cl_scf <<< backup as copy current controlfile for standby auxiliary format &1&; >>> # Turn off flashback database define cl_flash_off <<< sql clone 'alter database flashback off'; >>> # Make copies of copied sontrol file using restore on clone define cop_cl_cf2 <<< restore clone controlfile to &1& from &2&; >>> # Mount the database define mnt_cl_cf <<< alter clone database mount; >>> # Mount the standby database define mnt_cl_scf <<< sql clone 'alter database mount standby database'; >>> # Restore of standby define res_cl_scf <<< restore clone standby controlfile; >>> # Restore of standby controlfile from backup define res_cl_scffb <<< restore clone standby controlfile from &1&; >>> # Restore of clone controlfile define res_cl_clcf <<< restore clone controlfile; sql clone 'alter database mount clone database'; >>> # Restore of controlfile define res_cl_cf <<< shutdown clone immediate; startup clone force nomount restore clone primary controlfile; alter clone database mount; >>> # Restore of controlfile from backup define res_cl_cffb <<< shutdown clone immediate; startup clone force nomount restore clone primary controlfile from &1&; alter clone database mount; >>> # Online datafiles define cl_onl_df <<< sql clone "alter database datafile &1& online"; >>> # # change datafilecopy uncatalog # define cl_uncat <<< change datafilecopy &1& uncatalog; >>> # backup piece validation for AUTOLOCATE mode define val_pieces <<< --val_pieces declare pieceCnt number := 0; retval binary_integer; handle varchar2(512); recid number; stamp number; setCount number; setStamp number; piece number; valrc binary_integer; hdl_isdisk binary_integer := 0; m1 varchar2(128); m2 varchar2(128); m3 varchar2(128); m4 varchar2(128); m5 varchar2(128); m6 varchar2(128); m7 varchar2(128); m8 varchar2(128); m9 varchar2(128); m10 varchar2(128); m11 varchar2(128); m12 varchar2(128); m13 varchar2(128); m14 varchar2(128); m15 varchar2(128); m16 varchar2(128); m17 varchar2(128); m18 varchar2(128); m19 varchar2(128); m20 varchar2(128); returnCode binary_integer; msca binary_integer; attributes binary_integer; preview boolean := FALSE; recall boolean := FALSE; flags binary_integer := 0; disp_hdr boolean := TRUE; few_remote boolean := FALSE; few_remote_e exception; pragma exception_init(few_remote_e, -20507); function addcomma(media in varchar2) return varchar2 is out_media varchar2(80) := null; begin if media is not null then out_media := ',' ||media; end if; return out_media; end; begin &1& -- init the list sys.dbms_backup_restore.validationStart; -- collect the pieces loop retval := krmicd.getValidatePieceArgs(handle, recid, stamp, setCount, setStamp, piece, hdl_isdisk); deb('val_pieces', 'handle:'||handle||' is disk:'||hdl_isdisk); if (retval = 0) then -- we got a piece -- add it to our list sys.dbms_backup_restore.validationAddPiece( recid => recid, stamp => stamp, handle => handle, set_stamp => setStamp, set_count => setCount, pieceno => piece, params => NULL, hdl_isdisk => hdl_isdisk); -- for debugging pieceCnt := pieceCnt + 1; else -- no more pieces exit; end if; end loop; if recall then flags := sys.dbms_backup_restore.vvflags_recall; end if; -- do the validate sys.dbms_backup_restore.validationValidate(flags => flags); -- get the results -- put them into our data structures loop sys.dbms_backup_restore.validationNextResult(handle => handle, recid => recid, set_stamp => setStamp, set_count => setCount, pieceno => piece, msca => msca, m1 => m1, m2 => m2, m3 => m3, m4 => m4, m5 => m5, m6 => m6, m7 => m7, m8 => m8, m9 => m9, m10 => m10, m11 => m11, m12 => m12, m13 => m13, m14 => m14, m15 => m15, m16 => m16, m17 => m17, m18 => m18, m19 => m19, m20 => m20, attributes => attributes); exit when handle is NULL; deb('val_pieces', 'validated handle:' || handle||' Media:'||m1|| ' attributes='||attributes||' msca='||msca); if bitand(attributes, sys.dbms_backup_restore.attribute_remote) = 1 then if disp_hdr then if recall then krmicd.writeMsg(8608); krmicd.writeMsg(7524); else krmicd.writeMsg(8607); krmicd.writeMsg(6320); end if; disp_hdr := FALSE; few_remote := TRUE; end if; krmicd.writeMsg(6355, handle, m1 || addcomma(m2) || addcomma(m3) || addcomma(m4) || addcomma(m5) || addcomma(m6) || addcomma(m7) || addcomma(m8) || addcomma(m9) || addcomma(m10) || addcomma(m11) || addcomma(m12) || addcomma(m13) || addcomma(m14) || addcomma(m15) || addcomma(m16) || addcomma(m17) || addcomma(m18) || addcomma(m19) || addcomma(m20)); end if; pieceCnt := pieceCnt - 1; krmicd.setValidateResult(handle, recid, setStamp, setCount, piece, msca, m1 , m2 , m3 , m4 , m5 , m6 , m7 , m8 , m9 , m10, m11, m12, m13, m14, m15, m16, m17, m18, m19, m20); end loop; krmicd.endPieceValidate; sys.dbms_backup_restore.validationEnd; if few_remote and not preview then raise few_remote_e; end if; end; >>> define backup_baut <<< declare ncopies number; copyno number; handle varchar2(512); comment varchar2(80); media varchar2(80); lcfaudate date; lsequence binary_integer; lbautfmt varchar2(512); rsid number; rsts number; p1 binary_integer := 0; p2 binary_integer; p3 binary_integer; p4 binary_integer; p5 binary_integer; t1 varchar2(1025); t2 varchar2(1); t3 varchar2(1); busy_retries number := 0; begin -- lcfaudate must have day, year and month info to be extracted in -- server. As it is set only for internal testing, i am not using a -- KRMS_ISO_DATE_FORMAT format. &lsequence& &lcfaudate& &lbautfmt& &object& setBackupParams(FALSE); if (krmicd.getParams(1, p2, p3, p4, p5, t1, t2, t3)) then p1 := 1; end if; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); <> -- retry on failure to get snapshot enqueue begin sys.dbms_backup_restore.DoAutobackup(ncopies => ncopies, cfaudate => lcfaudate, seq => lsequence, format => lbautfmt, p1 => p1, p2 => p2, p3 => p3, p4 => t1); exception when sys.dbms_backup_restore.snapshot_enqueue_busy then -- retry up to 180 times, waiting 20 seconds between retries, -- thus attempting to get a snapshot control file for an hour. -- Since we are displaying RMAN-08512 message, user will know -- that RMAN is waiting to get snapshot enqueue. if busy_retries = 180 then krmicd.writeMsg(20029, 'cannot make a snapshot controlfile'); raise; end if; busy_retries := busy_retries + 1; -- print this message every 5 minutes if (mod(busy_retries, 15) = 0) then krmicd.writeMsg(8512); end if; krmicd.sleep(20); krmicd.clearErrors; goto snapshot; end; -- snapshot controlfile stuff copyno := 0; loop exit when copyno=ncopies; sys.dbms_backup_restore.backupPieceCrtDupGet(copyno, handle, comment, media); if comment is null then comment := 'NONE'; end if; krmicd.writeMsg(8503, handle, comment); copyno := copyno + 1; end loop; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> # skeleton to search for autobackup define raut_search <<< --- raut_search declare ldbid number := 0; trgt_date date; lyear varchar2(4); lmonth varchar2(2); lday varchar2(2); lmon_day varchar2(4); year binary_integer; month binary_integer; day binary_integer; ats number; -- autobackup time stamp days number; sft_seq binary_integer; -- search file table seq and date sft_date number; aut_seq binary_integer; -- used autobackup seq and date aut_date number; tempseq binary_integer; -- temp var tempdate number; old_seq binary_integer := 1; cfname varchar2(512); sfname varchar2(512); pfname varchar2(512); rc number; status number; lformat varchar2(512) := NULL; lsequence binary_integer := 255; lmaxdays number := 7; handle varchar2(512) := NULL; outhandle varchar2(512); sfthandle varchar2(512); lhandle varchar2(512); sdate varchar2(512) := NULL; currcf boolean := FALSE; set_ra varchar2(512) := NULL; -- recovery area specified by user set_ns varchar2(512) := NULL; -- name space specified by user uset_ns varchar2(512) := NULL; -- upper case of db_name given by user done boolean; found boolean; mustspfile boolean; -- must we find autobackup with spfile? abort boolean; dorestore boolean; endstatus boolean; fromcopy boolean := FALSE; begtime date; endtime date; seconds number; retval number; full_name varchar2(512); recid number; stamp number; isstby boolean := FALSE; -- restore standby controlfile? csf_retries number := 0; -- retry counter for spfile_not_in_bs -- or scf_not_in_bs rsid number; rsts number; start_time date; -- begin_time of restore elapsed number; -- elapsed time for restore hours number; mins number; secs number; dummyscn number; isomf boolean; isasm boolean; istmplt boolean; validate boolean := FALSE; preview boolean := FALSE; vheader boolean := FALSE; -- NOTE: ORA-6512 is dummy error becasue this error is always -- removed from error stack dummy_error exception; pragma exception_init(dummy_error, -6512); begin -- The following will be substituted for lsequence, lmaxdays, sdate, -- lformat, mustspfile and validate &1& old_seq := lsequence; aut_seq := lsequence; select decode(sdate, NULL, sysdate, to_date(sdate, 'MON DD YYYY HH24:MI:SS', 'NLS_CALENDAR=Gregorian')) into trgt_date from x$dual; select sysdate into start_time from x$dual; done := FALSE; abort := FALSE; dorestore := FALSE; endstatus := FALSE; isasm := FALSE; isomf := FALSE; istmplt := FALSE; aut_seq := 0; aut_date := 0; sft_date := 0; sft_seq := 0; sfthandle := to_char(NULL); outhandle := to_char(NULL); -- Pre-search for autobackups. if handle is not null then -- If user specified handle (in fact, backup piece name or copy name), -- then we will skip the algorithm for searching the piece name -- and just call validate operation. -- Because we are not sure if handle is piece or copy, we will try -- both. If validateBackupPiece succeeds, then it means that the handle -- is a backup piece. So, we will set fromcopy to FALSE. Otherwise, -- if validateDatafileCopy succeeds, then we will set fromcopy -- to TRUE. status := sys.dbms_backup_restore.validateBackupPiece( recid => 0, stamp => 0, handle => handle, set_stamp => 0, set_count => 0, pieceno => 0, params => NULL, hdl_isdisk => 0); rc := bitand(status, sys.dbms_backup_restore.validate_file_different); if (rc = 0) then outhandle := handle; fromcopy := FALSE; else status := sys.dbms_backup_restore.validateDatafileCopy( recid => 0, stamp => 0, fname => handle, dfnumber => 0, resetlogs_change => 0, creation_change => 0, checkpoint_change => 0, blksize => 0, signal => 0); rc := bitand(status, sys.dbms_backup_restore.validate_file_different); if (rc = 0) then outhandle := handle; fromcopy := TRUE; end if; end if; elsif (krmicd.getDevType = 'DISK') then if (lformat is not NULL and instr(lformat, '%') = 0) then -- Check if the format specified is an ASM diskgroup sys.dbms_backup_restore.isfileNameOMF(fname => lformat, isomf => isomf, isasm => isasm, istmplt => istmplt); end if; -- if the format is an ASM diskgroup we need to search first there if (istmplt and isasm) then deb('raut_search', 'Searching ASM diskgroup ' || lformat); -- Look for autobackup piece in the ASM area. sys.dbms_backup_restore.searchFiles(pattern => lformat, ns => set_ns, omf => TRUE, ccf => FALSE, ftype => 'U'); found := sys.dbms_backup_restore.findAutSearchFileTable( mustspfile => mustspfile, until => date2stamp(trgt_date), fname => lhandle, year => year, month => month, day => day, sequence => tempseq, ats => ats); krmicd.writeMsg(8600, lformat); krmicd.writeMsg(8549, set_ns); if (found) then krmicd.writeMsg(8601, krmicd.getChid, lhandle, lformat); sft_date := year*10000+month*100+day; sft_seq := tempseq; sfthandle := lhandle; deb('raut_search', 'sft_date=' || sft_date || ' sft_seq=' || sft_seq); else krmicd.writeMsg(8602, krmicd.getChid, lformat); end if; end if; -- Look for autobackup piece in the recovery area in order to find -- the latest autobackup between user specified controlfile autobackup -- location and recovery area. begin deb('raut_search', 'Searching recovery area ' || set_ra); sys.dbms_backup_restore.searchFiles(pattern => set_ra, ns => set_ns, omf => TRUE, ccf => FALSE, ftype => 'U'); found := sys.dbms_backup_restore.findAutSearchFileTable( mustspfile => mustspfile, until => date2stamp(trgt_date), fname => lhandle, year => year, month => month, day => day, sequence => tempseq, ats => ats); krmicd.writeMsg(8548, set_ra); krmicd.writeMsg(8549, set_ns); if (found) then krmicd.writeMsg(8546, krmicd.getChid, lhandle); tempdate := year*10000+month*100+day; if (sft_date < tempdate OR (sft_date = tempdate AND sft_seq < tempseq)) then if sfthandle is not null then deb('raut_search', 'Skipping autobackup ' || sfthandle || ' ;older than ' || lhandle); end if; sft_date := tempdate; sft_seq := tempseq; sfthandle := lhandle; deb('raut_search', 'sft_date=' || sft_date || ' sft_seq=' || sft_seq); else deb('raut_search', 'Skipping autobackup ' || lhandle || ' ;older than ' || sfthandle); end if; else krmicd.writeMsg(8547, krmicd.getChid); end if; exception when sys.dbms_backup_restore.ra_not_set then krmicd.clearErrors; end; end if; -- if not handle <> -- retry on *_not_in_bs errors from restoreBackupPiece -- To start with, assume the output handle as search handle. When -- we retry, we should still use search handle as one of valid -- autobackups. if (sfthandle is not null) then outhandle := sfthandle; end if; -- If the search was using ASM controlfile autobackup format, then there -- is no need to search non-OMF autobackups. if (istmplt and isasm) then goto raut_search_end; end if; -- If the user have specified the handle, then autobackup search should -- not be started, so jump to the end. if (handle is not null) then goto raut_search_end; end if; -- If dbid is not set, then terminate the search with a warning. if (ldbid = 0) then krmicd.writeMsg(8550, lformat); goto raut_search_end; end if; -- Here is the search for non-OMF autobackups. begin deb('raut_search', 'Searching manually starting with day: ' || trgt_date || ' and sequence: ' || old_seq); found := FALSE; for days in 1..lmaxdays loop select to_char(trgt_date, 'YYYY', 'NLS_CALENDAR=Gregorian'), to_char(trgt_date, 'MM', 'NLS_CALENDAR=Gregorian'), to_char(trgt_date, 'DD', 'NLS_CALENDAR=Gregorian'), upper(set_ns) into lyear, lmonth, lday, uset_ns from x$dual; select sysdate into begtime from x$dual; deb('raut_search', 'Channel '||krmicd.getChid|| ' starting search for day '||lyear||lmonth||lday|| ' at '||begtime); tempdate := to_number(lyear||lmonth||lday); for tempseq in reverse 0..old_seq loop -- If we already found an autobackup in recovery area that -- is later than this date, then abort the search. if (tempdate < sft_date OR (tempdate = sft_date AND tempseq < sft_seq)) then deb('raut_search', 'Skipping autobackup search; day ' || tempdate || ' older than ' || outhandle); found := TRUE; exit; end if; if krmicd.getAut(tempseq, lyear||lmonth||lday) = 1 then if (tempseq = old_seq) then krmicd.writeMsg(8535, krmicd.getChid, tempdate); end if; else abort := TRUE; exit; end if; lhandle := sys.dbms_backup_restore.genPieceName (pno => 0, set_count => 0, set_stamp => 0, format => lformat, copyno => 1, devtype => 'N/A', year => lyear, month => lmonth, day => lday, dbid => ldbid, ndbname => uset_ns, pdbname => NULL, cfseq => tempseq); if (mod(tempseq,10) = 0) then deb('raut_search', 'Channel '|| krmicd.getChid || ' looking for day: '|| lyear || lmonth || lday || ' sequence ' || tempseq || ' handle: '||lhandle); end if; -- check with media management or disk if backup piece exist status := sys.dbms_backup_restore.validateBackupPiece( recid => 0, stamp => 0, handle => lhandle, set_stamp => 0, set_count => 0, pieceno => 0, params => NULL, hdl_isdisk => 0); deb('raut_search', 'status=' || status); rc := bitand(status, sys.dbms_backup_restore.validate_file_different); deb('raut_search', 'rc=' || rc); if (rc = 0) then found := TRUE; aut_date := tempdate; aut_seq := tempseq; outhandle := lhandle; krmicd.writeMsg(8536, krmicd.getChid, lhandle); if (sfthandle is not null) then deb('raut_search', 'Skipping autobackup ' || sfthandle || ' ;older than ' || outhandle); end if; exit; end if; end loop; exit when (found OR abort); select sysdate, (sysdate-begtime)*60*24*60 into endtime, seconds from x$dual; deb('raut_search', 'Channel ' || krmicd.getChid || ' ending search for day ' || lyear || lmonth || lday || ' at ' || endtime ||' (elapsed: ' || seconds || ' seconds)'); old_seq := 255; trgt_date := trgt_date - 1; end loop; end; <> if outhandle is not null then -- If the final handle is search handle, then use its seq and date if outhandle = sfthandle then aut_seq := sft_seq; aut_date := sft_date; -- clear all search file table fields so that it won't be -- reused on retries sft_date := 0; sft_seq := 0; sfthandle := to_char(null); end if; -- Call krmicd.foundAut which saves the sequence and date about -- this backup into job sequencer context (qx_krmicx). -- In addition, the function krmicd.foundAut will return 0 if some -- other channels have already found something better. In that case, -- we set abort to TRUE, which means that we will finish autobackup -- search in this channel. if (krmicd.foundAut(aut_seq, aut_date) = 0) then abort := TRUE; else retval := krmicd.resAut(aut_seq, aut_date); while (retval = 2) loop sys.dbms_backup_restore.sleep(5); retval := krmicd.resAut(aut_seq, aut_date); end loop; if (retval = 0) then abort := TRUE; end if; if (retval = 1) then dorestore := TRUE; end if; end if; end if; if abort then krmicd.writeMsg(8537, krmicd.getChid); else if outhandle is null then if ldbid <> 0 then krmicd.writeMsg(8538, krmicd.getChid, lmaxdays); end if; krmicd.setNotFeasible; end if; end if; -- Call endAut [ END AUTobackup search ]. -- This callback will return FALSE if there is no more channels to -- process and we haven't find an autobackup. -- Additionally, the function resets all autobackup related flags for -- this channel. endstatus := krmicd.endAut; if preview then if not endstatus then krmicd.writeMsg(6172); krmicd.clearErrors; end if; return; end if; if not endstatus then -- Raise dummy error (ORA-6512). This error will be removed -- from error stack. We need to raise something becasue we need to -- tell RMAN That something went wrong. raise dummy_error; end if; if vheader then return; end if; >>> # skeleton to restore controlfile from autobackup define raut_cf <<< -- raut_cf -- The following will be substituted for cfname, currcf, isstdby, rsid, -- and rsts. &1& sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); -- setup trgt_date properly when we retry this algorithm to choose -- a older autobackup after a restore failure. if aut_seq = 0 then trgt_date := trgt_date - 1; old_seq := 255; else old_seq := aut_seq - 1; end if; if dorestore then if not fromcopy then -- In case fromcopy is FALSE, then handle -- represents the backup piece, so will call restoreBackupPiece. if validate then krmicd.writeMsg(8096, krmicd.getChid); elsif handle is null then krmicd.writeMsg(8553, krmicd.getChid, outhandle); else krmicd.writeMsg(8021, krmicd.getChid); end if; if (csf_retries = 0) then sys.dbms_backup_restore.restoreSetDataFile; setRestoreParams; if validate then sys.dbms_backup_restore.restorevalidate; else sys.dbms_backup_restore.restoreControlFileTo(cfname, isstby); end if; end if; begin sys.dbms_backup_restore.restoreBackupPiece(done => done, handle => outhandle, params => NULL); exception -- If restoreBackupPiece signals ORA-19695 (named exception -- scf_not_in_bs), then it means that the backupset does -- not contain a standby controlfile. So, we will try the older -- pieces. when sys.dbms_backup_restore.scf_not_in_bs then if (csf_retries = 3) or (handle is not null) then raise; end if; csf_retries := csf_retries + 1; krmicd.writeMsg(8133, krmicd.getChid); krmicd.writeMsg(8134, krmicd.getChid); krmicd.clearErrors; -- All other channels are already finished. So, we can -- go back and start searching again. done := FALSE; abort := FALSE; dorestore := FALSE; endstatus := FALSE; outhandle := to_char(NULL); aut_seq := 0; aut_date := 0; goto retry; end; select abs(sysdate-start_time) into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); if validate then krmicd.writeMsg(8182, krmicd.getChid, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); elsif handle is null then krmicd.writeMsg(8534, krmicd.getChid); else krmicd.writeMsg(8180, krmicd.getChid, to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); end if; -- print all restored controlfile names if currcf and not validate then print_controlfile; end if; else -- In case fromcopy is TRUE, then handle -- represents the controlfile copy, so will call copyControlFile. if validate then krmicd.writeMsg(8518, krmicd.getChid, outhandle); dummyscn := sys.dbms_backup_restore.scandatafilecopy( recid => recid, stamp => stamp, update_fuzziness => false, check_logical => false); else sys.dbms_backup_restore.copyControlFile(full_name => full_name, recid => recid, stamp => stamp, src_name => outhandle, dest_name => cfname); krmicd.writeMsg(8025, krmicd.getChid); end if; -- print all restored controlfile names. if currcf then print_controlfile; end if; end if; krmicd.fileRestored(ftype => rman_constant.CONTROLFILE, fno => 0, thread => 0, sequence => 0, resetscn => 0, resetstamp => 0); end if; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> # skeleton to restore SPFILE from autobackup define raut_sf <<< -- raut_sf -- The following will be substituted for sfnamem pfname, rsid, and rsts &1& sys.dbms_backup_restore.setRmanStatusRowId(rsid=>rsid, rsts=>rsts); -- setup trgt_date properly when we retry this algorithm to choose -- a older autobackup after a restore failure. if aut_seq = 0 then trgt_date := trgt_date - 1; old_seq := 255; else old_seq := aut_seq - 1; end if; if dorestore then krmicd.writeMsg(8554, krmicd.getChid, outhandle); -- The restore conversation is started only if this is the first pass. if (csf_retries = 0) then sys.dbms_backup_restore.restoreSetDataFile; setRestoreParams; sys.dbms_backup_restore.restoreSpFileTo(pfname => pfname, sfname => sfname); end if; begin sys.dbms_backup_restore.restoreBackupPiece(done => done, handle => outhandle, params => NULL); exception -- If restoreBackupPiece signals ORA-19687 (named exception -- spfile_not_in_bs), then it means that the backupset does -- not contain an SPFILE. So, we will try the older pieces. -- Note that "handle" is not null if user uses "RESTORE SPFILE -- FROM 'piece_name'" - in that case we should not retry. when sys.dbms_backup_restore.spfile_not_in_bs then if (csf_retries = 3) or (handle is not null) then raise; end if; csf_retries := csf_retries + 1; krmicd.writeMsg(8117, krmicd.getChid); krmicd.writeMsg(8134, krmicd.getChid); krmicd.clearErrors; -- All other channels are already finished. So, we can -- go back and start searching again. done := FALSE; abort := FALSE; dorestore := FALSE; endstatus := FALSE; outhandle := to_char(NULL); aut_seq := 0; aut_date := 0; goto retry; end; krmicd.fileRestored(ftype => rman_constant.SPFILE, fno => 0, thread => 0, sequence => 0, resetscn => 0, resetstamp => 0); krmicd.writeMsg(8541, krmicd.getChid); end if; sys.dbms_backup_restore.setRmanStatusRowId(rsid=>0, rsts=>0); end; >>> #skeleton to refresh agedfiles define refresh_agf <<< -- refresh_agf begin refreshAgedFiles; end; >>> # # switch_tf: rename a tempfile in controlfile. If tempfile doesn't exists # then add it to controlfile. This skeleton appears one time in a step. # define switch_tf <<< declare tsnum number; tsname varchar2(32); tfnum number; tfname varchar2(1024); create_time date; create_scn number; blocks number; blocksize binary_integer; rfnum number; exton boolean; isSFT boolean; maxsize number; nextsize number; missing boolean; rename boolean; begin loop exit when not krmicd.switchTempfile(tsnum, tsname, tfnum, tfname, create_time, create_scn, blocks, blocksize, rfnum, exton, isSFT, maxsize, nextsize, missing, rename); deb('switch_tf', 'channel: '|| krmicd.getChid || ' tempfile= '|| tfname || ' tsname= ' || tsname || ' blocks= ' || blocks || ' blocksize= ' || blocksize || ' create_scn= ' || to_char(create_scn) || ' maxsize= ' || maxsize || ' nextsize= ' || nextsize); sys.dbms_backup_restore.switchTempfile( tsnum => tsnum, tsname => tsname, tfnum => tfnum, tfname => tfname, create_time => create_time, create_scn => create_scn, blocks => blocks, blocksize => blocksize, rfnum => rfnum, exton => exton, isSFT => isSFT, maxsize => maxsize, nextsize => nextsize); if (missing) then krmicd.writeMsg(8184, tfname, tsname); elsif (rename) then krmicd.writeMsg(8185, to_char(tfnum), tfname); else krmicd.writeMsg(8186, tfname); end if; end loop; end; >>> define val_copies <<< -- val_copies declare objecttype number; handle varchar2(512); newname varchar2(512); recid number; stamp number; objkey1 number; objkey2 number; blksize number; ckpscn number; rlscn number; crescn number; rc number; found boolean; dummy boolean; &constants& internal_error exception; db_not_mounted exception; pragma exception_init(internal_error, -600); pragma exception_init(db_not_mounted, -1507); begin &object& loop found := FALSE; exit when not krmicd.valCopyGetNext( objecttype => objecttype, handle => handle, recid => recid, stamp => stamp, objkey1 => objkey1, objkey2 => objkey2, blksize => blksize, ckpscn => ckpscn, rlscn => rlscn, crescn => crescn, found => dummy); begin if (objecttype = krmiDC) then rc := sys.dbms_backup_restore.validateDataFileCopy( recid => recid, stamp => stamp, fname => handle, dfnumber => objkey1, resetlogs_change => rlscn, creation_change => crescn, checkpoint_change => ckpscn, blksize => blksize, signal => 0); if (bitand(rc, krmkvt_MISSING) != 0 or bitand(rc, krmkvt_MISSOK) != 0) then if (bitand(rc, krmkvt_FILE_DIFF) = 0 and bitand(rc, krmkvt_IN_USE) = 0 and bitand(rc, krmkvt_DEL_FOR_SPACE) = 0) then if (objkey1 = 0) then sys.dbms_backup_restore.inspectControlFile( fname => handle, full_name => newname, recid => recid, stamp => stamp); else sys.dbms_backup_restore.inspectDataFileCopy( fname => handle, full_name => newname, recid => recid, stamp => stamp); end if; deb('val_copies', 'new recid='||recid||' stamp='||stamp); end if; end if; elsif (objecttype = krmiPC) then rc := sys.dbms_backup_restore.proxyValOnly( recid => recid, stamp => stamp, handle => handle); elsif (objecttype = krmiRAL) then rc := sys.dbms_backup_restore.validateArchivedLog( recid => recid, stamp => stamp, fname => handle, thread => objkey1, sequence => objkey2, resetlogs_change => rlscn, first_change => crescn, blksize => blksize, signal => 0); else raise internal_error; end if; exception when internal_error then raise; when db_not_mounted then rc := 0; -- inspect should have failed but successfully validated krmicd.clearErrors; when others then rc := sys.dbms_backup_restore.validate_file_different; krmicd.writeErrMsg(1005, sqlerrm); krmicd.clearErrors; end; if (rc = 0 or rc = sys.dbms_backup_restore.validate_record_notfound) then found := TRUE; end if; if (krmicd.valCopySetFound(found, recid, stamp)) then deb('val_copies', 'chid: ' || krmicd.getChid || ' found ' || handle); end if; end loop; end; >>> define valhdr_copies <<< -- valhdr_copies declare objecttype number; handle varchar2(512); recid number; stamp number; objkey1 number; objkey2 number; blksize number; ckpscn number; rlscn number; crescn number; found boolean; allfound boolean := TRUE; &constants& internal_error exception; pragma exception_init(internal_error, -600); begin &object& loop found := FALSE; exit when not krmicd.valCopyGetNext( objecttype => objecttype, handle => handle, recid => recid, stamp => stamp, objkey1 => objkey1, objkey2 => objkey2, blksize => blksize, ckpscn => ckpscn, rlscn => rlscn, crescn => crescn, found => found); if (not found) then allfound := FALSE; if (objecttype = krmiDC) then if (objkey1 != 0) then krmicd.writeMsg(6727, handle); else krmicd.writeMsg(6728, handle); end if; elsif (objecttype = krmiPC) then krmicd.writeMsg(8165, handle); elsif (objecttype = krmiRAL) then krmicd.writeMsg(6726, handle); else raise internal_error; end if; krmicd.setNotFeasible; end if; end loop; if (allfound) then if (objecttype = krmiDC) then krmicd.writeMsg(8166); elsif (objecttype = krmiPC) then krmicd.writeMsg(8164); elsif (objecttype = krmiRAL) then krmicd.writeMsg(6158); else raise internal_error; end if; end if; end; >>> define 'x$valhdr_pieces' <<< procedure valhdr_pieces(bskey in number) is recid number; stamp number; begin if not krmicd.valGetFound(recid, stamp) then krmicd.writeMsg(12017, bskey); krmicd.setNotFeasible; elsif (krmicd.isValAllFound) then krmicd.writeMsg(8163); end if; end; >>> define valhdr_pieces <<< begin valhdr_pieces(&object&); end; >>> define create_working_set <<< -- create_working_set declare failureList sys.dbms_ir.ir_failure_list_type; firstcall binary_integer; adviseid number; failureId number; start_time date; elapsed number; hours number; mins number; secs number; begin &object& select sysdate into start_time from x$dual; if (adviseid is null) then firstcall := 1; loop exit when not krmicd.failureGetNext(firstcall, failureId); firstcall := 0; failureList(failureList.count + 1) := failureId; deb('create_working_set', 'added failureId = ' || failureId); end loop; sys.dbms_ir.getAdviseId(failureList => failureList, adviseid => adviseid); deb('create_working_set', 'adviseid = ' || adviseid); end if; krmicd.copyAdviseId(adviseid => adviseid); sys.dbms_ir.createWorkingRepairSet(adviseid => adviseid); select sysdate - start_time into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); deb('create_working_set', 'took ' || to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); end; >>> define update_feasibility_check <<< -- update_feasibility_check declare firstcall binary_integer; adviseid number; feasible_int binary_integer; repairFeasList sys.dbms_ir.ir_repair_feasibility_list; repairFeasRec sys.dbms_ir.ir_repair_feasibility; start_time date; elapsed number; hours number; mins number; secs number; begin &object& select sysdate into start_time from x$dual; firstcall := 1; loop exit when not krmicd.repairGetNext( firstcall => firstcall, failureidx => repairFeasRec.failureidx, repairidx => repairFeasRec.repairidx, feasibility => repairFeasRec.feasibility); firstcall := 0; -- no more first call repairFeasList(repairFeasList.count + 1) := repairFeasRec; if (repairFeasRec.feasibility) then feasible_int := 1; else feasible_int := 0; end if; deb('update_feasibility_check', 'failureidx=' || repairFeasRec.failureidx || ' repairidx=' || repairFeasRec.repairidx || ' feasible=' || feasible_int); end loop; if (repairFeasList.count > 0) then sys.dbms_ir.updateFeasibilityAndImpact( adviseid => adviseid ,repairList => repairFeasList); end if; sys.dbms_ir.consolidateRepair(adviseid => adviseid); select sysdate - start_time into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); deb('update_feasibility_check', 'took: ' || to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); end; >>> define create_repair_script <<< -- create_repair_script declare firstcall binary_integer; filename varchar2(512); fileid number; cmdline varchar2(513); cmdscript sys.dbms_ir.ir_script_file_type; start_time date; elapsed number; hours number; mins number; secs number; begin select sysdate into start_time from x$dual; firstcall := 1; loop exit when not krmicd.scriptLineGetNext(firstcall, cmdline); if (firstcall != 0) then sys.dbms_ir.createScriptFile( fileid => fileid ,fileName => fileName); end if; deb('create_repair_script', 'cmdline=' || cmdline); firstcall := 0; cmdscript(cmdscript.count + 1) := cmdline; end loop; if (fileid is not null) then sys.dbms_ir.writeFile(fileid => fileid ,contents => cmdscript); sys.dbms_ir.closeScriptFile(fileid => fileid); krmicd.copyRepairScriptName(filename); end if; select sysdate - start_time into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); deb('create_repair_script', 'took: ' || to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); end; >>> define update_repair_option <<< -- update_repair_option declare adviseid number; optionidx number; filename varchar2(512); start_time date; elapsed number; hours number; mins number; secs number; begin &object& select sysdate into start_time from x$dual; filename := krmicd.getRepairScriptName; deb('update_repair_option', 'script_name=' || filename); sys.dbms_ir.updateRepairOption( adviseid => adviseid ,optionidx => optionidx ,scriptname => filename); select sysdate - start_time into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); deb('update_repair_option', 'took: ' || to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); end; >>> define advise_done <<< -- advise_done declare adviseid number; repairOptionList sys.dbms_ir.ir_repair_option_list; start_time date; elapsed number; hours number; mins number; secs number; begin &object& select sysdate into start_time from x$dual; sys.dbms_ir.advisedone( adviseid => adviseid ,generatedRepairs => repairOptionList); for i in 1..repairOptionList.count loop deb('advise_done', 'optionidx=' || repairOptionList(i).optionidx || ' repairid=' || repairOptionList(i).repairid); krmicd.copyRepairId( optionidx => repairOptionList(i).optionidx ,repairid => repairOptionList(i).repairid); end loop; select sysdate - start_time into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); deb('advise_done', 'took: ' || to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); end; >>> define get_repair_script <<< -- get_repair_script declare repairid number; fileid number; cmdscript sys.dbms_ir.ir_script_file_type; start_time date; elapsed number; hours number; mins number; secs number; opendb boolean := TRUE; begin &object& select sysdate into start_time from x$dual; sys.dbms_ir.openScriptFile( repairid => repairid ,fileid => fileid); sys.dbms_ir.getFile( fileid => fileid ,contents => cmdscript); sys.dbms_ir.closeScriptFile(fileid => fileid); for i in 1..cmdscript.count loop deb('get_repair_script', 'cmdline=' || cmdscript(i)); if (not opendb and cmdscript(i) like '%alter database open resetlogs%') then deb('get_repair_script', 'skipping open resetlogs'); else krmicd.scriptLineCopyNext(cmdscript(i)); end if; end loop; select sysdate - start_time into elapsed from x$dual; dur2time(elapsed, hours, mins, secs); deb('get_repair_script', 'took: ' || to_char(hours, 'FM09') || ':' || to_char(mins, 'FM09') || ':' || to_char(secs, 'FM09')); end; >>> #repair description define repair_desc <<< #&^desc& >>> #end of repair command define repair_command_end <<< ; >>> #add datafile numbers - next word should start in a new line define repair_add_df <<< datafile &^1& >>> #add block numbers - next word should start in a new line define repair_add_block <<< datafile &2& block &^1& >>> #split numbers into multiple lines - next word should start in a new line define repair_add_arg <<< &^1& >>> #feasibility check for restore datafile define repair_fc_restore_df <<< restore validate header preview >>> #repair script for restore datafile define repair_rs_restore_df <<< restore >>> #feasibility check for recover datafile define repair_fc_recover_df <<< recover validate header preview >>> #repair script for recover datafile define repair_rs_recover_df <<< recover >>> #feasibility check for offline datafile define repair_fc_offline_df <<< declare open_inst binary_integer; mount_inst binary_integer; total_inst binary_integer; system_tbs binary_integer; noarchivelog binary_integer; online_rw binary_integer; feasibility boolean := TRUE; type numTab_t is table of number index by binary_integer; dfnol numTab_t; begin &1& select count(case when status = 'OPEN' then 1 else 0 end), count(case when status = 'MOUNTED' then 1 else 0 end), count(*) into open_inst, mount_inst, total_inst from gv$instance; deb('fc_offline_df', 'open_inst=' || open_inst || ' mount_inst=' || mount_inst || ' total_inst=' || total_inst); if (open_inst != total_inst OR mount_inst != total_inst) then feasibility := FALSE; end if; select count(*) into noarchivelog from v$database where log_mode = 'NOARCHIVELOG'; deb('fc_offline_df', 'noarchivelog=' || noarchivelog); for i in 1..dfnol.count loop exit when not feasibility; select count(*) into system_tbs from (select fe.fenum file#, fe.fetsn ts# from x$kccfe fe where fe.fedup != 0) df, v$tablespace ts where ts.name = 'SYSTEM' and ts.ts# = df.ts# and df.file# = dfnol(i); deb('fc_offline_df', 'dfno=' || dfnol(i)); deb('fc_offline_df', 'system_tbs=' || system_tbs); if (system_tbs > 0) then feasibility := FALSE; else select count(*) into online_rw from (select fe.fenum file#, decode(fe.fetsn, 0, decode(bitand(fe.festa,2), 0, 'SYSOFF', 'SYSTEM'), decode(bitand(fe.festa,18), 0, 'OFFLINE', 2, 'ONLINE', 'RECOVER')) status, decode(fe.fedor, 2,'READ ONLY', decode(bitand(fe.festa, 12), 0, 'DISABLED', 4, 'READ ONLY', 12, 'READ WRITE', 'UNKNOWN')) enabled from x$kccfe fe where fe.fedup != 0) df where df.status = 'ONLINE' and df.enabled = 'READ WRITE' and df.file# = dfnol(i); deb('fc_offline_df', 'online_rw=' || online_rw); if (noarchivelog > 0 and online_rw > 0) then feasibility := FALSE; end if; end if; end loop; if (feasibility) then deb('fc_offline_df', 'feasibility=TRUE'); else deb('fc_offline_df', 'feasibility=FALSE'); krmicd.setNotFeasible; end if; end; >>> #repair script for alter database datafile define repair_rs_alter_df <<< sql 'alter database datafile &^1& >>> #repair script for offline datafile define repair_rs_offline_df <<< offline'; >>> #repair script for online datafile define repair_rs_online_df <<< online'; >>> #feasibility check for restore database define repair_fc_restore_db <<< restore database validate header preview; >>> #feasibility check for restore database until scn define repair_fc_restore_dbpitr <<< restore database validate header preview until scn &^untscn&; >>> #repair script for restore database define repair_rs_restore_db <<< restore database; >>> #repair script to reset database incarnation for dbpitr define repair_rs_reset_incarnation <<< reset database to incarnation &^1&; >>> #repair script for restore database until scn define repair_rs_restore_dbpitr <<< restore database until scn &^untscn&; >>> #feasibility check for recover database define repair_fc_recover_db <<< recover validate header preview database; >>> #feasibility check for recover database until scn define repair_fc_recover_dbpitr <<< recover validate header preview database until scn &^untscn&; >>> #repair script for recover database define repair_rs_recover_db <<< recover database; >>> #repair script for recover database define repair_rs_recover_dbpitr <<< recover database until scn &^untscn&; >>> #repair script for online redo logs define repair_rs_open_resetlogs <<< alter database open resetlogs; >>> #feasibility check for flashback database define repair_fc_flashback_db <<< flashback validate header preview database to before scn &^1&; >>> #repair script for flashback database define repair_rs_flashback_db <<< flashback database to before scn &^1&; >>> #feasibility check for block media recovery define repair_fc_bmr <<< recover validate header preview >>> #repair script for block media recovery define repair_rs_bmr <<< recover >>> #feasibility check for restore controlfile define repair_fc_restore_ctl <<< restore validate header preview controlfile&^1&; >>> #repair script for restoring controlfile in nocatalog mode define repair_rs_set_dbid <<< set dbid&^1&; >>> #repair script for restore controlfile define repair_rs_restore_ctl <<< restore controlfile&^1&; sql 'alter database mount'; >>> #repair script to restart database define repair_rs_restart_db <<< shutdown; startup nomount; >>> #feasibility check for replicate controlfile define repair_fc_replicate_ctl <<< declare feasibility boolean := FALSE; cfname v$controlfile.name%type; begin for i in 1..9999 loop cfname := sys.dbms_backup_restore.getparm( sys.dbms_backup_restore.control_file, i); exit when cfname is null; feasibility := sys.dbms_ir.controlfilecheck(cfname => cfname); if (feasibility) then deb('repair_fc_replicate_ctl', cfname || ' feasible'); else deb('repair_fc_replicate_ctl', cfname || ' not feasible'); end if; exit when feasibility; end loop; if (feasibility) then krmicd.copyReplicateCtlFrom(cfname => cfname); else krmicd.setNotFeasible; end if; end; >>> #repair script for replicate controlfile define repair_rs_replicate_ctl <<< restore controlfile from&^1&; sql 'alter database mount'; >>> #feasibility check for non-rman repair define repair_fc_sqlscript <<< declare repairtype number; feasibility boolean; dataloss number; repairtime number; parameterlist varchar2(2048); impact varchar2(256); begin -- repairtype and parameterlist &1& sys.dbms_ir.getFeasibilityAndImpact( repairtype => repairtype ,parameterlist => parameterlist ,feasibility => feasibility ,dataloss => dataloss ,repairtime => repairtime ,impact => impact); if (feasibility) then deb('fc_sqlscript', 'feasibility=TRUE'); else deb('fc_sqlscript', 'feasibility=FALSE'); krmicd.setNotFeasible; end if; end; >>> #repair script for non-rman repair define repair_rs_sqlscript <<< sql "begin sys.dbms_ir.execsqlscript(filename =>&1&); end;"; >>> #feasibility script for forcing open resetlogs define repair_fc_force_openreset <<< declare repairtype number; feasibility boolean; dataloss number; repairtime number; parameterlist varchar2(2048); impact varchar2(256); begin -- repairtype and parameterlist &1& sys.dbms_ir.getFeasibilityAndImpact( repairtype => repairtype ,parameterlist => null ,feasibility => feasibility ,dataloss => dataloss ,repairtime => repairtime ,impact => impact); if (feasibility) then deb('fc_force_openreset', 'feasibility=TRUE'); else deb('fc_force_openreset', 'feasibility=FALSE'); krmicd.setNotFeasible; end if; end; >>> #repair script for forcing open resetlogs define repair_rs_force_openreset <<< sql 'alter database recover database until cancel'; alter database open resetlogs; >>> #STOPSTOP <--- this is where krmk.pc stops parsing during initialization # ######################################################################### # Create Recovery Catalog # # # # If new members are added here, the static array krmkcre_libunits must # # be updated with the new member names. # # # # The reason that the views here are created with 'create or replace' # # rather than just 'create' is that some of these view libmems are also # # used to upgrade the recovery catalog, and we always want to create the # # most current view. # # # # ######################################################################### # NAME # catrman.sql # DESCRIPTION # Create Recovery Catalog tables and views # # Recovery Catalog MUST NOT be created under SYS (INTERNAL). # The owner must be granted RECOVERY_CATALOG_OWNER role or corresponding # privileges and sufficient quota on the default tablespace. # # create user rman identified by xxxxxx # default tablespace tbsnnn quota unlimited on tbsnnn; # grant recovery_catalog_owner to rman; # connect rman/xxxxxx # @catrman # # Accessing the recovery catalog tables directly is not supported. # # NOTES # # Remember to update catnormn.sql when adding new tables or views # # MODIFIED (MM/DD/YY) # swerthei 06/03/98 - add media_pool to physical media tables # swerthei 05/16/98 - add proxy copy tables and views # fsanchez 03/28/98 - Duplexed backup sets # dbeusee 04/06/98 - xcheck enh. # dbeusee 03/13/98 - Fix bug 624305 (don't harm existing catalog). # gpongrac 01/20/98 - allow nulls in dfatt for fname and blocks # swerthei 11/11/97 - bug 585165: make df.read_only NOT NULL # swerthei 08/18/97 - add stop_change#, read_only columns to rc_datafil # gpongrac 07/01/97 - track offline clean and read only scn # tpystyne 04/26/97 - add ts_u2 constraint # dalpern 04/16/97 - renamed prvtrman to prvtrmnu - "user" v. "sys" # gpongrac 03/31/97 - fix typo # gpongrac 03/31/97 - change comments about ckp.cf_create_time # gpongrac 03/31/97 - change comments about dbinc.cf_create_time # gpongrac 03/31/97 - change index on offr # gpongrac 03/31/97 - add cf_create_time to offr # tpystyne 03/20/97 - bug 465098: remove comments that choke sqlplus # gpongrac 03/05/97 - add backup set recid and stamp to rc_backup_dataf # gpongrac 02/20/97 - add completion time to bdf and rc_backup_datafile # tpystyne 01/10/97 - allow null next_time in al # swerthei 12/10/96 - add backup piece/set time columns # tpystyne 12/16/96 - move clone_fname to df # tpystyne 12/13/96 - add parent_dbinc_key # tpystyne 12/09/96 - code review changes # gpongrac 11/15/96 - add comments about ckp_u1 constraint # gpongrac 11/12/96 - add ckp_type to ckp_u1 constraint # gpongrac 11/08/96 - change ckp_u1 # swerthei 11/08/96 - remove NOT NULL constraint from ts.create_time # tpystyne 11/11/96 - add stored script views # gpongrac 11/04/96 - add high_df_recid to ckp table # gpongrac 11/01/96 - keep low and next times in al # swerthei 10/31/96 - remove df.unrecoverable_scn # gpongrac 10/24/96 - remove bloblocksize from rc_log_history # swerthei 10/23/96 - add cdf.completion_time, al.completion_time # gpongrac 10/23/96 - move blocksize to al table # tpystyne 10/21/96 - change JOB to PARTIAL # tpystyne 10/20/96 - make device_type not null # tpystyne 10/13/96 - fix rc_ views # gpongrac 10/07/96 - have rc_datafile return blocksize # tpystyne 10/03/96 - add next_scn to al # tpystyne 09/05/96 - add online redo log support # gpongrac 08/22/96 - use @@ not @ # tpystyne 08/26/96 - add ckp_time to ccf and bcf # gpongrac 08/16/96 - add unique constraint to scr table # gpongrac 08/13/96 - stored script support # tpystyne 07/24/96 - add incr_level to cdf # bhomsi 07/16/96 - A new ref log needed for tkrmrman.tsc if update # gpongrac 07/03/96 - install the packages too # tpystyne 07/05/96 - remove bcf_offr and ccf_offr # tpystyne 07/01/96 - implement offline range resync # tpystyne 06/19/96 - remove unused columns from rc_tablespace # tpystyne 05/30/96 - allow null dates # tpystyne 05/20/96 - add indexes to tune dbms_rcvman # tpystyne 04/26/96 - allow duplicate thread# and sequence# numbers # tpystyne 03/14/96 - restructure primary and foreign keys # tpystyne 02/20/96 - fix views again # tpystyne 02/12/96 - add fname_hash columns # tpystyne 01/31/96 - update for set_stamp and set_count # tpystyne 01/18/96 - fix view definitions # gpongrac 01/10/96 - drop domain col from dbinc table # tpystyne 12/06/95 - more fixes # tpystyne 12/01/95 - remove incremental backup SCNs # tpystyne 11/14/95 - add new tables # tpystyne 11/01/95 - incorporate design changes # tpystyne 10/18/95 - add rt and rlh tables # tpystyne 10/13/95 - add rcvcat views # tpystyne 10/09/95 - name constraints # tpystyne 10/03/95 - fix backup set tables # tpystyne 10/02/95 - replace rbs with tsatt # gpongrac 09/26/95 - move device_type from bs to bp # tpystyne 09/26/95 - add db_key, recid and stamp columns to bs # - add unique keys # gpongrac 09/25/95 - add block count columns to bdf and brl # gpongrac 09/25/95 - add devtype column to bs table # gpongrac 09/22/95 - change col type for ts.ts_name from char(30) to v # gpongrac 09/21/95 - add creation SCN and recovery SCN to table ts # - change offr and bdf tables too # tpystyne 09/20/95 - incorporate design changes # gpongrac 08/08/95 - add rbs (rollback segments and stuff) # gpongrac 07/17/95 - merge # gpongrac 07/17/95 - deal with offline clean and readonly correctly # gpongrac 07/17/95 - merge # gpongrac 06/07/95 - recovery catalog DDL # gpongrac 04/14/95 - Created # character set of the recovery catalog database should be a superset # all target database character sets. This is important if tablespace, # or tag names contain non-ASCII characters. # DB table contains all target databases that have been registered in # recovery catalog. The unique key constraint on db_id prevents clients # registering the same database twice. define db <<< CREATE TABLE db ( db_key NUMBER NOT NULL, -- sequence generated primary key db_id NUMBER NOT NULL, -- kccfhdbi from controlfile -- updatable columns curr_dbinc_key NUMBER, -- current incarnation CONSTRAINT db_p PRIMARY KEY (db_key), -- db_key is primary key CONSTRAINT db_u1 UNIQUE(db_id) -- ensure that db_id is unique ) &tablespace& >>> define db_insert_trigger <<< create or replace trigger db_insert_trigger before insert on db for each row declare is_owner number; is_auth number; can_add number; begin -- In the following statement, user_users.username resolves to the owner -- of this trigger (i.e. the base recovery catalog owner), and user -- resolves to the currently logged-in user. So this tells us whether -- the currently logged-in user is also the recovery catalog owner. select count(*) into is_owner from user_users where username=user; if is_owner > 0 then return; end if; select count(*) into is_auth from vpc_databases where filter_uid = uid and db_id = :new.db_id; if is_auth > 0 then return; end if; select count(*) into can_add from vpc_users where filter_uid = uid and add_new_db = 'Y'; if can_add = 0 then raise_application_error(num => -20012, msg => 'not authorized to add new database'); end if; -- At this point, we have a VPC user who has been granted the -- REGISTER DATABASE privilege, but does not already have CATALOG -- privilege for the database that is being registered. We now add -- a privilege row to implicitely grant this user the CATALOG privilege -- for the new database that they are registering. insert into vpc_databases(filter_user, filter_uid, db_id) values(user, uid, :new.db_id); end; >>> # primary key generator define rman_seq <<< CREATE SEQUENCE rman_seq >>> # CONF table contains the RMAN configuration of the target # database registred in this recovery catalog. # The foregin key is db_key. In other words, we have RMAN # configuration settings per one database. define conf <<< CREATE TABLE conf ( db_key NUMBER NOT NULL, -- db to which this configuration -- belongs conf# NUMBER NOT NULL, -- configuration number -- (row number in cf) name VARCHAR2(65) NOT NULL, -- configuration name value VARCHAR2(1025), -- configuration value -- The db_unique_name keeps track to which site (database in the standby -- configuration) this RMAN configuration belongs. If db_unique_name is -- NULL, then it means that configuration is generic and should used -- by all sites. db_unique_name VARCHAR2(512) DEFAULT NULL, site_key NUMBER DEFAULT 0 NOT NULL, -- When we upgrade catalog to 10i, then this cleanup column is set to 'YES'. -- This means that on the very first call to resetConfig2() we will -- wipe out all configuration - regardless if they are site specific or -- generic. This will force full resync from cf to recovery catalog which -- will create the correct information. After that, the clenaup column will -- remain FALSE. cleanup VARCHAR2(3) DEFAULT 'YES', CONSTRAINT conf_f1 FOREIGN KEY(db_key) REFERENCES db ON DELETE CASCADE ) &tablespace& >>> define conf_i_db <<< CREATE INDEX conf_i_db on conf(db_key) &tablespace& >>> # The node table is used to track the latest recid we saw for a particular # db_unique_name. The point is that each site (database in standby # configuration) has different db_unique_name, so there will be one # row for each site (database in standby configuration). # Ideally, we should have named this table as node, but it is too much work # to change it now... define 'node' <<< CREATE TABLE node ( db_unique_name VARCHAR2(512), -- Instance site name db_key NUMBER NOT NULL, high_conf_recid NUMBER DEFAULT 0 NOT NULL, -- last configuration recid seen force_resync2cf VARCHAR2(3) DEFAULT 'NO' NOT NULL, -- force resync to controlfile high_rout_stamp NUMBER default 0, inst_startup_stamp NUMBER default 0, -- current role of this site. This can take 'PRIMARY' or 'STANDBY'. This is -- to prevent maintaining multiple configuration for PRIMARY database role. -- Note that there are standby failover scenarios where a node table can have -- multiple entry for PRIMARY database. One for the new primary(failovered -- standby) and other than old primary. database_role VARCHAR2(7) DEFAULT 'PRIMARY' NOT NULL, site_key NUMBER DEFAULT 0 NOT NULL, last_kccdivts NUMBER DEFAULT 0, -- for incarnation resync high_ic_recid NUMBER DEFAULT 0, -- recid based incarnation resync cf_create_time DATE, -- cf version_time at last resync -- This was controlfile_created in -- 8.0.2, now is version_time. dbinc_key NUMBER DEFAULT 0 NOT NULL, -- incarnation of ckp_scn ckp_scn NUMBER DEFAULT 0 NOT NULL, -- cf ckp scn at last full resync full_ckp_cf_seq NUMBER DEFAULT 0 NOT NULL, -- cf seq at last full resync job_ckp_cf_seq NUMBER DEFAULT 0 NOT NULL, -- cf seq at last partial resync high_ts_recid NUMBER, -- tablespace recid high_df_recid NUMBER, -- datafile recid high_rt_recid NUMBER, -- redo thread recid high_orl_recid NUMBER, -- online redo log recid high_offr_recid NUMBER DEFAULT 0 NOT NULL, -- offline range (kkor) recid high_rlh_recid NUMBER DEFAULT 0 NOT NULL, -- log history (kcclh) recid high_al_recid NUMBER DEFAULT 0 NOT NULL, -- archived log (kccal) recid high_bs_recid NUMBER DEFAULT 0 NOT NULL, -- backup set (kccbs) recid high_bp_recid NUMBER DEFAULT 0 NOT NULL, -- backup piece (kccbp) recid high_bdf_recid NUMBER DEFAULT 0 NOT NULL, -- backup datafile (kccbf) recid high_cdf_recid NUMBER DEFAULT 0 NOT NULL, -- datafile copy (kccdc) recid high_brl_recid NUMBER DEFAULT 0 NOT NULL, -- backup redo log (kccbl) recid high_bcb_recid NUMBER DEFAULT 0 NOT NULL, -- backup datafile corruption recid high_ccb_recid NUMBER DEFAULT 0 NOT NULL, -- datafile copy corruption recid high_do_recid NUMBER DEFAULT 0 NOT NULL, -- deleted object recid high_pc_recid NUMBER DEFAULT 0 NOT NULL, -- proxy copy (kccpc) recid high_bsf_recid NUMBER DEFAULT 0 NOT NULL, -- backup SPFILE (kccbi) recid high_rsr_recid NUMBER DEFAULT 0 NOT NULL, -- RMAN status (kccrsr) recid high_tf_recid NUMBER DEFAULT 0 NOT NULL, -- tempfile recid high_grsp_recid NUMBER DEFAULT 0 NOT NULL, -- guaranteed restore point recid high_nrsp_recid NUMBER DEFAULT 0 NOT NULL, -- normal restore point recid high_bcr_recid NUMBER DEFAULT 0 NOT NULL, -- high blk crpt (kccblkcor) recid low_bcr_recid NUMBER DEFAULT 0 NOT NULL, -- low blk crpt (kccblkcor) recid bcr_in_use VARCHAR2(3) DEFAULT 'NO' NOT NULL, -- is there x$kccblkcor present CONSTRAINT node_p PRIMARY KEY (site_key), CONSTRAINT check_site_key CHECK (site_key > 0), CONSTRAINT node_f1 FOREIGN KEY(db_key) REFERENCES db ON DELETE CASCADE, CONSTRAINT node_u1 UNIQUE (db_key, db_unique_name) ) &tablespace& >>> # the DBINC table contains all incarnations of the target databases registered # in this recovery catalog. The unique key constraint on db_key, reset_scn and # reset_time prevents clients from registering the same incarnation twice. # # The high recid columns for circular-reuse type records keep track of the # records propagated from controlfile to recovery catalog. Only new records # with recids higher than the corresponding high recid needs to be propagated # by the next resync. define dbinc <<< CREATE TABLE dbinc ( dbinc_key NUMBER NOT NULL, -- sequence generated primary key db_key NUMBER NOT NULL, -- database to which this incarnation -- belongs db_name VARCHAR2(8) NOT NULL, -- current db_name reset_scn NUMBER NOT NULL, -- SCN of last resetlogs reset_time DATE NOT NULL, -- timestamp of last resetlogs parent_dbinc_key NUMBER, -- parent incarnation -- updatable columns dbinc_status VARCHAR2(8) DEFAULT 'ORPHAN' NOT NULL, CONSTRAINT dbinc_status CHECK(dbinc_status in ('CURRENT', 'PARENT', 'ORPHAN')), CONSTRAINT dbinc_p PRIMARY KEY(dbinc_key), CONSTRAINT dbinc_u1 UNIQUE (db_key, reset_scn, reset_time), CONSTRAINT dbinc_f1 FOREIGN KEY(db_key) REFERENCES db ON DELETE CASCADE ) &tablespace& >>> # ensure that current database incarnation exists in dbinc define db_f1 <<< ALTER TABLE db ADD CONSTRAINT db_f1 FOREIGN KEY(curr_dbinc_key) REFERENCES dbinc >>> # the CKP table records all recovery catalog checkpoints. # Every complete recovery catalog resync will take snapshot of the target # database controlfile and resync the recovery catalog from the snapshot # control file. A checkpoint record is insert into the CKP to indicate that # a full resync was done. The unique constraint ckp_u1 # ensures that the controlfile changes between two resyncs. # N.B. If you code any new uses for ckp, or change existing uses, make sure # that the code in dbms_rcvcat.cleanupCKP does not delete the rows you need. define ckp <<< CREATE TABLE ckp ( ckp_key NUMBER NOT NULL, -- primary key ckp_scn NUMBER NOT NULL, -- controlfile checkpoint scn ckp_time DATE, -- controlfile checkpoint timestamp ckp_cf_seq NUMBER NOT NULL, -- controlfile sequence cf_create_time DATE NOT NULL, -- controlfile version_time -- This was controlfile_created in -- 8.0.2, now is version_time. dbinc_key NUMBER NOT NULL, -- database incarnation ckp_type VARCHAR2(7) NOT NULL, -- resync type, 'FULL' or 'PARTIAL' ckp_db_status VARCHAR2(7), -- 'OPEN' or 'MOUNTED' resync_time DATE NOT NULL, -- resync time site_key NUMBER DEFAULT 0 NOT NULL, CONSTRAINT ckp_p PRIMARY KEY (ckp_key), -- If you change ckp_u1, then the query in dbms_rcvman.getCheckpoint -- may need revisions to its where clause. That query must always -- be a single-row query. CONSTRAINT ckp_u1 UNIQUE (dbinc_key, ckp_scn, ckp_type, ckp_cf_seq, cf_create_time), CONSTRAINT ckp_f1 FOREIGN KEY (dbinc_key) -- checkpoint belongs to a dbinc REFERENCES dbinc ON DELETE CASCADE, CONSTRAINT ckp_f3 FOREIGN KEY (site_key) -- checkpoint belongs to a site REFERENCES node ON DELETE CASCADE, CONSTRAINT ckp_c_type CHECK (ckp_type in ('FULL', 'PARTIAL')) ) &tablespace& >>> # the TS table contains all tablespaces of all database incarnations. # Note that the same tablespace ts# and name may exist multiple times in # a database incarnation. This will happen if a tablespace is dropped and # recreated. The unique key constraint on dbinc_key, ts# and drop_scn # ensures that only one of them is current (drop_scn is null). # The drop_scn is calculated when a resync notices that the tablespace is # is dropped. If a new tablespace with the same ts# is seen then the drop_scn # is set to the create_scn of the tablespace. Otherwise the drop_scn # is set to the ckp_scn. Note that the tablespace never exists at drop_scn, # it is always dropped before the drop_scn and drop_time. # Note that the create_time is null for offline tablespaces after # creating the controlfile. define ts <<< CREATE TABLE ts ( dbinc_key NUMBER NOT NULL, -- database incarnation ts# NUMBER NOT NULL, -- tablespace id in target db ts_name VARCHAR2(30) NOT NULL, -- tablespace name create_scn NUMBER NOT NULL, -- creation SCN (from 1st datafile) create_time DATE, -- creation time plugin_scn NUMBER DEFAULT 0 NOT NULL, -- plugin SCN bigfile VARCHAR2(3) DEFAULT 'NO' NOT NULL, -- is this a bigfile tablespace? -- 'YES' - bigfile -- 'NO' - smallfile temporary VARCHAR2(3) DEFAULT 'NO' NOT NULL, -- is this a temporary tablespace? -- 'YES' - locally managed temp ts -- 'NO' - not a temp ts -- updatable columns drop_scn NUMBER, -- drop SCN (as calculated) -- NULL if tablespace exists drop_time DATE, -- drop time included_in_database_backup VARCHAR2(3) DEFAULT 'YES' NOT NULL, -- is it included in db backup? -- 'YES' - included -- 'NO' - not included encrypt_in_backup VARCHAR2(3), -- is it encrypted in backup? -- 'ON' - encrypted -- 'OFF' - not encrypted -- NULL - user database conf CONSTRAINT ts_p1 PRIMARY KEY (dbinc_key, ts#, create_scn, plugin_scn), CONSTRAINT ts_u1 UNIQUE (dbinc_key, ts#, drop_scn), CONSTRAINT ts_u3 UNIQUE (dbinc_key, ts_name, create_scn, plugin_scn), CONSTRAINT ts_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE ) &tablespace& >>> # tablespace attributes that change over time define tsatt <<< CREATE TABLE tsatt ( dbinc_key NUMBER NOT NULL, -- database incarnation ts# NUMBER NOT NULL, -- tablespace id in target db create_scn NUMBER NOT NULL, -- creation SCN plugin_scn NUMBER DEFAULT 0 NOT NULL, -- plugin SCN start_ckp_key NUMBER NOT NULL, -- ckp when first observed end_ckp_key NUMBER, -- ckp when changed; NULL->current rbs_count NUMBER, -- number of rollback segs in this ts -- NULL -> unknown CONSTRAINT tsatt_u2 UNIQUE (dbinc_key, ts#, create_scn, plugin_scn,end_ckp_key), CONSTRAINT tsatt_f4 FOREIGN KEY (dbinc_key, ts#, create_scn, plugin_scn) REFERENCES ts ON DELETE CASCADE INITIALLY DEFERRED, CONSTRAINT tsatt_f2 FOREIGN KEY (start_ckp_key) REFERENCES ckp, CONSTRAINT tsatt_f3 FOREIGN KEY (end_ckp_key) REFERENCES ckp ) &tablespace& >>> # the DF table contains all datafiles of all database incarnations. # Note that the same datafile file# may exists multiple times in a database # incarnation. This may happen if the datafile is dropped (with its # tablespace) and a new datafile gets the old file#. The unique key on # ts_key and file# ensures that the same file# is not used for the same # tablespace (since datafiles can only be dropped with their tablespaces) # The file# and create_scn combination must also be unique for a database # incarnation. # The drop_scn is calculated when a resync notices that a datafile has # disappeared from the controlfile of the target database. If a new file # with the same file# (and higher create_scn) is seen then the drop_scn is # set to the create_scn of the new file. Otherwise the drop_scn # is set to the ckp_scn. This calculation guarantees that two datafiles # with the same file# never appear to have existed at the same point-in-time # (or SCN). This is important since the query "which datafiles existed at # SCN X" should never return two files with the same file#. define df <<< CREATE TABLE df ( dbinc_key NUMBER NOT NULL, -- database incarnation file# NUMBER NOT NULL, -- database file number create_scn NUMBER NOT NULL, -- creation SCN create_time DATE, -- creation timestamp ts# NUMBER NOT NULL, -- tablespace id in target db ts_create_scn NUMBER NOT NULL, -- tablespace creation SCN block_size NUMBER NOT NULL, -- blocksize -- updatable columns clone_fname VARCHAR2(1024), -- clone datafile name (alias aux_name) drop_scn NUMBER, -- drop SCN (as calculated) -- NULL if datafile exists drop_time DATE, -- drop time stop_scn NUMBER, -- offline clean or read only scn stop_time DATE, -- timestamp for above SCN read_only NUMBER NOT NULL, -- 1 if stop_scn is read only, else 0 rfile# NUMBER, -- tablespace relative file number df_key NUMBER NOT NULL, blocks NUMBER, -- size of file foreign_dbid NUMBER DEFAULT 0 NOT NULL, foreign_create_scn NUMBER DEFAULT 0 NOT NULL, foreign_create_time DATE, plugged_readonly VARCHAR2(3) DEFAULT 'NO' NOT NULL, plugin_scn NUMBER DEFAULT 0 NOT NULL, plugin_reset_scn NUMBER DEFAULT 0 NOT NULL, plugin_reset_time DATE, create_thread NUMBER, -- null means unknown, 0 not recreatable create_size NUMBER, -- null means unknown, 0 not recreatable CONSTRAINT df_p PRIMARY KEY (dbinc_key, file#, create_scn, plugin_scn), CONSTRAINT df_u1 UNIQUE (dbinc_key, file#, drop_scn), -- CONSTRAINT df_u2 UNIQUE (dbinc_key, ts#, ts_create_scn, file#), -- file belongs to a tablespace -- bug 9971106: df_f1 replaced with df_f2 CONSTRAINT df_f2 FOREIGN KEY (dbinc_key, ts#, ts_create_scn, plugin_scn) REFERENCES ts ON DELETE CASCADE ) &tablespace& >>> # datafile attributes at site define site_dfatt <<< CREATE TABLE site_dfatt ( fname VARCHAR2(1024), -- datafile name df_key NUMBER NOT NULL, site_key NUMBER NOT NULL, CONSTRAINT site_dfatt_p PRIMARY KEY (df_key, site_key), --CONSTRAINT site_dfatt_f1 FOREIGN KEY (df_key) -- REFERENCES df, CONSTRAINT site_dfatt_f2 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE ) &tablespace& >>> # The OFFR table stores datafile offline ranges # Note that datafile ranges are stored in two places in the control file. # The most recent offline range (if any) of each datafile is kept in the # datafile record itself and previous ranges are stored in the offline range # records. In the recovery catalog all offline ranges are recorded in the # OFFR table # The resync will first read datafile records and resync the most # recent offline ranges. Note that the offr_recid and offr_stamp are null # until the offline range is moved to the offline range record. # Then it will read all offline range records with recid higher than the # highwater mark and resync them. If the offline range is already in the # recovery catalog, its recid and stamp are updated in case they are null. # The unique key on dbinc_key, file# and offline_scn ensure that the same # offline range can't be entered twice. An offline range range belongs to # the database incarnation that was current at the end (online scn) # of the range. # In order to allow multiple tspitr to the same pit creation_scn is added # to the unique index, otherwise duplicated value on index and RMAN-20087 # (bug 607271) define offr <<< CREATE TABLE offr ( offr_key NUMBER NOT NULL, dbinc_key NUMBER NOT NULL, -- database incarnation offr_recid NUMBER, -- offline range recid offr_stamp NUMBER, -- offline range stamp file# NUMBER NOT NULL, -- datafile number create_scn NUMBER NOT NULL, -- datafile creation scn offline_scn NUMBER NOT NULL, -- scn at datafile was taken offline online_scn NUMBER NOT NULL, -- online checkpoint SCN online_time DATE NOT NULL, -- online checkpoint time cf_create_time DATE, -- controlfile creation time CONSTRAINT offr_p PRIMARY KEY (offr_key), CONSTRAINT offr_u2 UNIQUE (dbinc_key, file#, create_scn, offline_scn, cf_create_time), CONSTRAINT offr_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE ) &tablespace& >>> # the TF table contains all tempfiles of all database incarnations. # Note that the same tempfile file# may exists multiple times in a database # incarnation. This may happen if the tempfile is dropped (with its # tablespace) and a new tempfile gets the old file#. The unique key on # ts_key and file# ensures that the same file# is not used for the same # tablespace (since tempfiles can only be dropped with their tablespaces) # The file# and create_scn combination must also be unique for a database # incarnation. define tf <<< CREATE TABLE tf ( dbinc_key NUMBER NOT NULL, -- database incarnation file# NUMBER NOT NULL, -- tempfile number create_scn NUMBER NOT NULL, -- creation SCN create_time DATE, -- creation timestamp ts# NUMBER NOT NULL, -- tablespace id in target db ts_create_scn NUMBER NOT NULL, -- tablespace creation SCN plugin_scn NUMBER DEFAULT 0 NOT NULL, -- always zero for temp files -- required for foreign constraint block_size NUMBER NOT NULL, -- blocksize rfile# NUMBER, -- tablespace relative file number tf_key NUMBER NOT NULL, CONSTRAINT tf_p PRIMARY KEY (dbinc_key, file#, create_scn) -- CONSTRAINT tf_u2 UNIQUE (dbinc_key, ts#, ts_create_scn, file#), -- file belongs to a tablespace -- bug 9971106: tf_f1 replaced with tf_f2 -- The constraints tf_f2 and tf_c1_plugin_scn are added during create catalog -- using separate SQL defined in libmems. It is done to enable upgrade -- from 9.2 work seamlessly to create TF table. -- CONSTRAINT tf_f2 FOREIGN KEY (dbinc_key, ts#, ts_create_scn, plugin_scn) -- REFERENCES ts ON DELETE CASCADE, -- CONSTRAINT tf_c1_plugin_scn CHECK (plugin_scn = 0) ) &tablespace& >>> # tempfile attributes at site # The drop_scn is calculated when a resync notices that a tempfile has # disappeared from the controlfile of the target database. If a new file # with the same file# (and higher create_scn) is seen then the drop_scn is # set to the create_scn of the new file. Otherwise the drop_scn # is set to the ckp_scn. This calculation guarantees that two tempfiles # with the same file# never appear to have existed at the same point-in-time # (or SCN). This is important since the query "which tempfiles existed at # SCN X" should never return two files with the same file#. define site_tfatt <<< CREATE TABLE site_tfatt ( fname VARCHAR2(1024), -- datafile name tf_key NUMBER NOT NULL, site_key NUMBER NOT NULL, drop_scn NUMBER, -- drop SCN (as calculated) -- NULL if tempfile exists drop_time DATE, -- drop time blocks NUMBER, -- size of file in blocks autoextend VARCHAR2(3), max_size NUMBER, next_size NUMBER, CONSTRAINT site_tfatt_p PRIMARY KEY (tf_key, site_key), --CONSTRAINT site_tfatt_f1 FOREIGN KEY (tf_key) -- REFERENCES tf, CONSTRAINT site_tfatt_f2 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE ) &tablespace& >>> # The RR table contains redo ranges for all database incarnations # A redo range is a continuous sequence of redo log history records # propagated to the recovery catalog. A new redo range is started when # recovery catalog resync detects a gap in redo log history. This will # happen when the control file is recreated or when a redo history record # is overwritten in the controlfile before it is propagated to the # recovery catalog. define rr <<< CREATE TABLE rr ( rr_key NUMBER NOT NULL, dbinc_key NUMBER NOT NULL, -- database incarnation low_scn NUMBER NOT NULL, -- low SCN of the range high_scn NUMBER NOT NULL, -- high SCN of the range CONSTRAINT rr_p PRIMARY KEY (rr_key), CONSTRAINT rr_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE ) &tablespace& >>> # the RT table contains all redo threads for all database incarnations. # Although threads cannot be dropped they may appear dropped if the # controlfile of the target database is recreated. define rt <<< CREATE TABLE rt ( dbinc_key NUMBER NOT NULL, -- database incarnation thread# NUMBER NOT NULL, -- thread number -- updatable columns sequence# NUMBER NOT NULL, -- last log sequence number allocated enable_scn NUMBER, -- SCN of last enable enable_time DATE, -- timestamp of last enable disable_scn NUMBER, -- SCN of last disable disable_time DATE, -- timestamp of last disable status VARCHAR2(1) NOT NULL, -- 'D' -> disabled -- 'E' -> enabled -- 'O' -> open -- 'I' -> internally disabled CONSTRAINT rt_p PRIMARY KEY (dbinc_key, thread#), CONSTRAINT rt_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE, CONSTRAINT rt_c1_status CHECK (status in ('D','E','O','I')) ) &tablespace& >>> # the ORL table contains all redo logfiles and standby redo logfiles # for all database incarnations. define orl <<< CREATE TABLE orl ( dbinc_key NUMBER NOT NULL, -- database incarnation thread# NUMBER NOT NULL, -- thread number group# NUMBER NOT NULL, -- group number fname VARCHAR2(1024) NOT NULL, -- datafile name bytes NUMBER DEFAULT NULL, -- size of redolog type VARCHAR2(7) DEFAULT 'ONLINE', -- ONLINE or STANDBY site_key NUMBER, --CONSTRAINT orl_p PRIMARY KEY (dbinc_key, fname), -- key is too long CONSTRAINT orl_f1 FOREIGN KEY (dbinc_key, thread#) REFERENCES rt ON DELETE CASCADE, CONSTRAINT orl_f2 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE ) &tablespace& >>> define orl_i_1 <<< CREATE INDEX orl_i_1 on orl(dbinc_key, thread#, group#) &tablespace& >>> # the RLH table records all redo log history for all threads. # The redo log history entries in the control file are circularly reused, # so it is important to resync often enough to propagate records to # recovery catalog before they are reused in the controlfile define rlh <<< CREATE TABLE rlh ( rlh_key NUMBER NOT NULL, dbinc_key NUMBER NOT NULL, -- database incarnation rlh_recid NUMBER NOT NULL, -- log history recid from control file rlh_stamp NUMBER NOT NULL, -- log history stamp from control file thread# NUMBER NOT NULL, -- thread number sequence# NUMBER NOT NULL, -- log sequence number low_scn NUMBER NOT NULL, -- scn generated when switching in low_time DATE NOT NULL, next_scn NUMBER NOT NULL, -- scn generated when switching out status VARCHAR2(1), -- 'C' -> cleared CONSTRAINT rlh_p PRIMARY KEY (rlh_key), CONSTRAINT rlh_u1 UNIQUE (dbinc_key, thread#, sequence#, low_scn), CONSTRAINT rlh_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE, CONSTRAINT rlh_c_status CHECK (status in ('C')) ) &tablespace& >>> # The AL table contains archived logs # It corresponds to the V$ARCHIVED_LOG fixed view in # in the control file. The archived logs are uniquely identified by dbinc_key, # recid and stamp. # Note that it is possible to have an archived log without the corresponding # log history record. This can happen if the log history record in the # controlfile is written over or after resetlogs which clears the log history. # Therefore the constraint referencing rlh is commented out. define al <<< CREATE TABLE al ( al_key NUMBER NOT NULL, dbinc_key NUMBER NOT NULL, -- database incarnation al_recid NUMBER NOT NULL, -- archive log recid from control file al_stamp NUMBER NOT NULL, -- archive log stamp from control file thread# NUMBER NOT NULL, -- thread number sequence# NUMBER NOT NULL, -- log sequence number low_scn NUMBER NOT NULL, -- scn generated when switching in low_time DATE NOT NULL, -- time low SCN allocated next_scn NUMBER NOT NULL, -- scn generated when switching out next_time DATE, -- time when next SCN allocated fname VARCHAR2(1024), -- archive log file name, -- NULL -> log was cleared fname_hashkey VARCHAR2(20), -- hashed fname for indexing archived VARCHAR2(1) NOT NULL, -- 'Y' -> archived log -- 'N' -> inspected online log blocks NUMBER NOT NULL, -- number of blocks written block_size NUMBER NOT NULL, -- size of a block in bytes completion_time DATE NOT NULL, -- time the log was archived or copied is_standby VARCHAR2(1), -- 'Y' if standby, 'N' if primary status VARCHAR2(1) NOT NULL, dictionary_begin VARCHAR2(3), -- log contains start of logminer dict dictionary_end VARCHAR2(3), -- log contains end of logminer dict is_recovery_dest_file VARCHAR2(3) DEFAULT 'NO' NOT NULL, -- is file a recovery destination one compressed VARCHAR2(3) DEFAULT 'NO', -- compressed creator VARCHAR2(7) DEFAULT NULL, terminal VARCHAR2(3) DEFAULT 'NO', -- 'YES' for terminal rcv log site_key NUMBER, -- Null when log owner is unknown CONSTRAINT al_p PRIMARY KEY (al_key), CONSTRAINT al_u1 UNIQUE (dbinc_key, al_recid, al_stamp, is_standby), CONSTRAINT al_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE, CONSTRAINT al_f2 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE, CONSTRAINT al_c_archived CHECK (archived in ('Y','N')), CONSTRAINT al_c_status CHECK (status in ('A','U','D','X')), CONSTRAINT al_c_is_standby CHECK (is_standby in ('Y','N')) ) &tablespace& >>> define al_i_fname_status <<< CREATE INDEX al_i_fname_status on al(fname_hashkey, status) &tablespace& >>> define al_i_2 <<< CREATE INDEX al_i_2 on al(dbinc_key, thread#, sequence#, low_scn) &tablespace& >>> # the BS table contains all backup sets for all database incarnations # The unique key on db_key, set_stamp and set_count ensures that a backup # set can only be inserted once into the recovery catalog. # A "place-holder" bs record is created when checkBackupPiece finds a kccbp # record whose parent kccbs record has already aged out of the controlfile. # In this case, the following colums will be null: # bs_recid, bs_stamp, bck_type, incr_level, start_time, completion_time, # status. define bs <<< CREATE TABLE bs ( bs_key NUMBER NOT NULL, -- sequence generated primary key db_key NUMBER NOT NULL, -- database bs_recid NUMBER, -- backup set recid from control file bs_stamp NUMBER, -- backup set stamp from control file set_stamp NUMBER NOT NULL, -- set_stamp from control file set_count NUMBER NOT NULL, -- set_count from control file bck_type VARCHAR2(1), -- 'D' -> full datafile -- 'I' -> incremental datafile -- 'L' -> archivelog incr_level NUMBER, -- incremental backup level (0 - 4) -- null if this a FULL backupset or -- INCREMENTAL FROM SCN backup pieces NUMBER NOT NULL, -- number of backup pieces in the set start_time DATE, -- time when this backup started completion_time DATE, -- time when this backup completed status VARCHAR2(1), -- 'A' -> complete set of pieces avail -- 'D' -> no pieces or all deleted -- 'O' -> neither of above controlfile_included -- Indicates if this backup set has VARCHAR2(7), -- a controlfile in it -- 'NONE' -> it does not -- 'BACKUP' -> it includes a backup cf -- 'STANDBY' -> it includes a -- standby controlfile input_file_scan_only VARCHAR2(3), -- 'NO' -> this is a real backup -- 'YES' -> this was 'backup validate' keep_options NUMBER DEFAULT 0 NOT NULL,-- if backup is done with keep option -- then keep_options can be: -- KEEP_LOGS -- KEEP_NOLOGS -- KEEP_CONSIST keep_until DATE, -- valid only if keep_options != 0 -- NULL means forever block_size NUMBER DEFAULT NULL, -- block size for backup set -- Will be NULL when < 10gR2 rman exec -- creates. site_key NUMBER, -- Null when set owner is unknown -- or on more than one site multi_section VARCHAR2(1), -- 'Y' if backup is multi-section, else -- null CONSTRAINT bs_p PRIMARY KEY (bs_key), CONSTRAINT bs_u2 UNIQUE (db_key, set_stamp, set_count), CONSTRAINT bs_f1 FOREIGN KEY(db_key) -- backup set belongs to a db REFERENCES db ON DELETE CASCADE, CONSTRAINT bs_f2 FOREIGN KEY (site_key) REFERENCES node, CONSTRAINT bs_c_bck_type CHECK (bck_type in ('D','I','L')), CONSTRAINT bs_c_incr_level CHECK (incr_level in (0,1,2,3,4)), -- note: incr_level can be null. CHECK constraints allow this. CONSTRAINT bs_c_controlfile_included CHECK (controlfile_included in ('NONE','BACKUP','STANDBY')) ) &tablespace& >>> define bs_i_1 <<< CREATE INDEX bs_i_1 on bs(db_key, bs_recid, bs_stamp) &tablespace& >>> # the BP table contains all backup pieces of backup sets. # The unique key on bs_key, bp_recid and bp_stmp ensures that the same piece # can't be entered twice in the recovery catalog. Note that there may be # multiple copies of of the same piece, but each copy will have its own row # in the BP table (and its own backup piece record in the control file). # Do not create a unique key constraint on bs_key and piece#. define bp <<< CREATE TABLE bp ( bp_key NUMBER NOT NULL, -- sequence generated primary key bs_key NUMBER NOT NULL, -- backup set db_key NUMBER NOT NULL, bp_recid NUMBER NOT NULL, -- backup piece recid from control file bp_stamp NUMBER NOT NULL, -- backup piece stamp from control file piece# NUMBER NOT NULL, -- first piece is 1 copy# NUMBER NOT NULL, -- copy number, 1 if no copies tag VARCHAR2(32), -- user specified tag device_type VARCHAR2(255) NOT NULL, -- 'DISK' -> on disk rather than seq handle VARCHAR2(1024) NOT NULL, --backup piece handle handle_hashkey VARCHAR2(30) NOT NULL, -- indexed hashkey on handle comments VARCHAR2(255), -- media VARCHAR2(80), -- media handle media_pool NUMBER, -- media pool concur VARCHAR2(1) NOT NULL, -- 'Y' media supports concurrent access start_time DATE NOT NULL, -- time when this piece started completion_time DATE NOT NULL, -- time when this piece completed -- updatable columns status VARCHAR2(1) NOT NULL, bytes NUMBER DEFAULT NULL, is_recovery_dest_file VARCHAR2(3) DEFAULT 'NO' NOT NULL, -- is file a recovery destination one rsr_key NUMBER, -- key of the row in the rsr table compressed VARCHAR2(3) DEFAULT 'NO',-- compressed site_key NUMBER, -- Null when piece owner is unknown encrypted VARCHAR2(1) DEFAULT 'N',-- 'Y' means encrypted, otherwise not backed_by_osb VARCHAR2(1) DEFAULT 'N',-- 'Y' means backed by OSB CONSTRAINT bp_p PRIMARY KEY (bp_key), CONSTRAINT bp_u1 UNIQUE (bs_key, bp_recid, bp_stamp), CONSTRAINT bp_f1 FOREIGN KEY (bs_key) REFERENCES bs ON DELETE CASCADE, CONSTRAINT bp_f2 FOREIGN KEY (site_key) REFERENCES node, CONSTRAINT bp_c_status CHECK (status in ('A','U','D','X')), CONSTRAINT bp_c_concur CHECK (concur in ('Y','N')) ) &tablespace& >>> define bp_i_device_handle_status <<< CREATE INDEX bp_i_device_handle_status on bp(handle_hashkey, status) &tablespace& >>> define bp_i_2 <<< CREATE INDEX bp_i_2 on bp(db_key, bp_recid, bp_stamp) &tablespace& >>> # The BCF table contains control file backups (in backup sets) # Note that a backup datafile record with file# 0 is used to represent the # backup control file in the V$BACKUP_DATAFILE view. define bcf <<< CREATE TABLE bcf ( bcf_key NUMBER NOT NULL, bs_key NUMBER NOT NULL, -- backup set dbinc_key NUMBER NOT NULL, -- database incarnation bcf_recid NUMBER NOT NULL, -- recid from control file bcf_stamp NUMBER NOT NULL, -- stamp from control file ckp_scn NUMBER NOT NULL, -- controlfile checkpoint SCN ckp_time DATE NOT NULL, -- controlfile checkpoint time create_time DATE NOT NULL, -- controlfile creation time min_offr_recid NUMBER NOT NULL, -- recid of the oldest offline range block_size NUMBER NOT NULL, -- blocksize controlfile_type VARCHAR2(1), -- 'B' -> backup controlfile -- 'S' -> standby controlfile blocks NUMBER, -- # blocks autobackup_date DATE, -- controlfile autobackup date autobackup_sequence NUMBER, -- controlfile autobackup sequence CONSTRAINT bcf_p PRIMARY KEY (bcf_key), CONSTRAINT bcf_u2 UNIQUE (bs_key), CONSTRAINT bcf_f1 FOREIGN KEY (bs_key) REFERENCES bs ON DELETE CASCADE, CONSTRAINT bcf_c_cf_type CHECK (controlfile_type in ('S','B')) ) &tablespace& >>> # The CCF table contains control file copies # Note that a datafile copy record with file# 0 is used to represent the # control file copy in the V$DATAFILE_COPY view. define ccf <<< CREATE TABLE ccf ( ccf_key NUMBER NOT NULL, dbinc_key NUMBER NOT NULL, -- database incarnation ccf_recid NUMBER NOT NULL, -- recid from control file ccf_stamp NUMBER NOT NULL, -- stamp from control file fname VARCHAR2(1024) NOT NULL, -- cf copy file name fname_hashkey VARCHAR2(20) NOT NULL, -- hashed fname for indexing tag VARCHAR2(32), -- cf copy tag ckp_scn NUMBER NOT NULL, -- controlfile checkpoint SCN ckp_time DATE NOT NULL, -- controlfile checkpoint time create_time DATE NOT NULL, -- controlfile creation time min_offr_recid NUMBER NOT NULL, -- recid of the oldest offline range blocks NUMBER DEFAULT NULL, -- number of blocks block_size NUMBER NOT NULL, -- blocksize completion_time DATE NOT NULL, -- time that the copy was taken controlfile_type VARCHAR2(1), -- 'B' -> backup controlfile -- 'S' -> standby controlfile -- updatable columns status VARCHAR2(1) NOT NULL, keep_options NUMBER DEFAULT 0 NOT NULL,-- if backup is done with keep option -- then keep_options can be: -- KEEP_LOGS -- KEEP_NOLOGS -- KEEP_CONSIST keep_until DATE, -- valid only if keep_options != 0 -- if NULL means forever. is_recovery_dest_file VARCHAR2(3) DEFAULT 'NO' NOT NULL, -- is file a recovery destination one rsr_key NUMBER, -- key of the row in the rsr table site_key NUMBER, -- Null when cfcopy owner is unknown CONSTRAINT ccf_p PRIMARY KEY (ccf_key), CONSTRAINT ccf_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE, CONSTRAINT ccf_f2 FOREIGN KEY (site_key) REFERENCES node, CONSTRAINT ccf_u1 UNIQUE (dbinc_key, ccf_recid, ccf_stamp), CONSTRAINT ccf_c_status CHECK (status in ('A','U','D','X')), CONSTRAINT ccf_c_cf_type CHECK (controlfile_type in ('S','B')) ) &tablespace& >>> define ccf_i_fname_status <<< CREATE INDEX ccf_i_fname_status on ccf(fname_hashkey, status) &tablespace& >>> # The XCF table contains control file proxy copies Note that a datafile # proxy copy record with file# 0 is used to represent the control file # copy in the V$PROXY_DATAFILE view. define xcf <<< CREATE TABLE xcf ( xcf_key NUMBER NOT NULL, dbinc_key NUMBER NOT NULL, -- database incarnation xcf_recid NUMBER NOT NULL, -- recid from control file xcf_stamp NUMBER NOT NULL, -- stamp from control file tag VARCHAR2(32), -- cf copy tag ckp_scn NUMBER NOT NULL, -- controlfile checkpoint SCN ckp_time DATE NOT NULL, -- controlfile checkpoint time create_time DATE NOT NULL, -- controlfile creation time min_offr_recid NUMBER NOT NULL, -- recid of the oldest offline range blocks NUMBER DEFAULT NULL, -- number of blocks in file block_size NUMBER NOT NULL, -- blocksize device_type VARCHAR2(255) NOT NULL, -- 'DISK' -> on disk rather than seq handle VARCHAR2(1024) NOT NULL, -- backup piece handle handle_hashkey VARCHAR2(30) NOT NULL, -- indexed hashkey on handle comments VARCHAR2(255), -- media VARCHAR2(80), -- media handle media_pool NUMBER, -- media pool start_time DATE NOT NULL, -- time when this piece started completion_time DATE NOT NULL, -- time when this piece completed controlfile_type VARCHAR2(1), -- 'B' -> backup controlfile -- 'S' -> standby controlfile -- updatable columns status VARCHAR2(1) NOT NULL, keep_options NUMBER DEFAULT 0 NOT NULL,-- if backup is done with keep option -- then keep_options can be: -- KEEP_LOGS -- KEEP_NOLOGS -- KEEP_CONSIST keep_until DATE, -- valid only if keep_options != 0 -- if NULL means forever. rsr_key NUMBER, -- key of the row in the rsr table site_key NUMBER, -- Null when proxycopy owner is unknown CONSTRAINT xcf_p PRIMARY KEY (xcf_key), CONSTRAINT xcf_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE, CONSTRAINT xcf_f2 FOREIGN KEY (site_key) REFERENCES node, CONSTRAINT xcf_u1 UNIQUE (dbinc_key, xcf_recid, xcf_stamp), CONSTRAINT xcf_c_status CHECK (status in ('A','U','D','X')), CONSTRAINT xcf_c_cf_type CHECK (controlfile_type in ('S','B')) ) &tablespace& >>> define xcf_i_handle_status <<< CREATE INDEX xcf_i_handle_status on xcf(handle_hashkey, status) &tablespace& >>> # The BSF table contains SPFILE backups (in backup sets). # A backup SPFILE is uniquely identified by bs_key, bsf_recid and bsf_stamp. # The table is filled with data from V$BACKUP_SPFILE view. define bsf <<< CREATE TABLE bsf ( bsf_key NUMBER NOT NULL, bs_key NUMBER NOT NULL, -- backup set db_key NUMBER NOT NULL, -- database to which this SPFILE -- belongs db_unique_name VARCHAR2(30), -- Instance site name -- NULL indicates not unknown bsf_recid NUMBER NOT NULL, -- recid from control file bsf_stamp NUMBER NOT NULL, -- stamp from control file modification_time DATE NOT NULL, -- SPFILE modification time bytes NUMBER NOT NULL, -- size of SPFILE in bytes CONSTRAINT bsf_p PRIMARY KEY (bsf_key), CONSTRAINT bsf_u2 UNIQUE (bs_key), CONSTRAINT bsf_f1 FOREIGN KEY (bs_key) REFERENCES bs ON DELETE CASCADE ) &tablespace& >>> define bsf_i_bs_key <<< CREATE INDEX bsf_i_bs_key on bsf(bs_key) &tablespace& >>> # The BDF table contains all datafile backups (in backup sets). # A backup datafile is uniquely identified by bs_key, bdf_recid and bdf_stamp. define bdf <<< CREATE TABLE bdf ( bdf_key NUMBER NOT NULL, -- primary key dbinc_key NUMBER NOT NULL, -- database incarnation bdf_recid NUMBER NOT NULL, -- bdf recid from control file bdf_stamp NUMBER NOT NULL, -- bdf stamp from control file bs_key NUMBER NOT NULL, -- backup set, null if copy file# NUMBER NOT NULL, -- database file number create_scn NUMBER NOT NULL, -- creation SCN create_time DATE DEFAULT NULL,-- creation time incr_level NUMBER, -- incremental backup level (null,0-4) incr_scn NUMBER NOT NULL, -- scn since backup contains changes ckp_scn NUMBER NOT NULL, -- scn of the last datafile ckpt ckp_time DATE NOT NULL, -- time of the last datafile ckpt abs_fuzzy_scn NUMBER, -- absolute fuzzy scn rcv_fuzzy_scn NUMBER, -- media recovery fuzzy scn rcv_fuzzy_time DATE, -- timestamp for media rcv fuzzy scn datafile_blocks NUMBER NOT NULL, -- number of blocks in datafile blocks NUMBER NOT NULL, -- number of blocks written to backup block_size NUMBER NOT NULL, -- blocksize completion_time DATE, -- completion time (null for 8.0.2) blocks_read NUMBER NOT NULL, -- number of blocks read for backup marked_corrupt NUMBER DEFAULT NULL, -- corrupt blocks found reading used_chg_track VARCHAR2(1) DEFAULT 'N', -- was change tracking file used used_optim VARCHAR2(1) DEFAULT 'N', -- were bitmaps used foreign_dbid NUMBER DEFAULT 0 NOT NULL, plugged_readonly VARCHAR2(3) DEFAULT 'NO' NOT NULL, plugin_scn NUMBER DEFAULT 0 NOT NULL, plugin_reset_scn NUMBER DEFAULT 0 NOT NULL, plugin_reset_time DATE, section_size NUMBER, CONSTRAINT bdf_p PRIMARY KEY (bdf_key), CONSTRAINT bdf_u2 UNIQUE (bs_key, file#), CONSTRAINT bdf_f1 FOREIGN KEY (bs_key) REFERENCES bs ON DELETE CASCADE ) &tablespace& >>> define bdf_i_bs_key <<< CREATE INDEX bdf_i_bs_key on bdf(bs_key) &tablespace& >>> define bdf_i_df_key <<< CREATE INDEX bdf_i_df_key on bdf(dbinc_key, file#, create_scn) &tablespace& >>> # the CDF table contains all datafile copies. # A datafile copy is uniquely identified by dbinc_key, recid and stamp columns define cdf <<< CREATE TABLE cdf ( cdf_key NUMBER NOT NULL, -- primary key dbinc_key NUMBER NOT NULL, -- database incarnation cdf_recid NUMBER NOT NULL, -- df copy recid from control file cdf_stamp NUMBER NOT NULL, -- df copy stamp from control file file# NUMBER NOT NULL, -- database file number create_scn NUMBER NOT NULL, -- creation SCN create_time DATE DEFAULT NULL,-- timestamp when datafile was created fname VARCHAR2(1024) NOT NULL, -- df copy file name fname_hashkey VARCHAR2(20) NOT NULL, -- hashed fname for indexing tag VARCHAR2(32), -- df copy tag incr_level NUMBER, -- incremental backup level (null or 0) ckp_scn NUMBER NOT NULL, -- scn of the last datafile ckpt ckp_time DATE NOT NULL, -- time of the last datafile ckpt onl_fuzzy VARCHAR2(1) NOT NULL, -- 'Y' -> online fuzzy bck_fuzzy VARCHAR2(1) NOT NULL, -- 'Y' -> backup fuzzy abs_fuzzy_scn NUMBER, -- absolute fuzzy scn, if known. -- null if not known. rcv_fuzzy_scn NUMBER, -- media recovery fuzzy scn rcv_fuzzy_time DATE, -- timestamp for media rcv fuzzy scn blocks NUMBER NOT NULL, -- number of blocks block_size NUMBER NOT NULL, -- blocksize completion_time DATE NOT NULL, -- time when this copy completed -- updatable columns status VARCHAR2(1) NOT NULL, keep_options NUMBER DEFAULT 0 NOT NULL, -- if backup is done with keep option -- then keep_options can be: -- KEEP_LOGS -- KEEP_NOLOGS -- KEEP_CONSIST keep_until DATE, -- valid only if keep_options != 0 -- if NULL means forever. scanned VARCHAR2(1) DEFAULT 'N' NOT NULL, -- 'Y' -> file was completely scanned is_recovery_dest_file VARCHAR2(3) DEFAULT 'NO' NOT NULL, -- is file a recovery destination one rsr_key NUMBER, -- key of the row in the rsr table marked_corrupt NUMBER DEFAULT NULL, -- corrupt blocks when reading site_key NUMBER, -- Null when dfcopy owner is unknown foreign_dbid NUMBER DEFAULT 0 NOT NULL, plugged_readonly VARCHAR2(3) DEFAULT 'NO' NOT NULL, plugin_scn NUMBER DEFAULT 0 NOT NULL, plugin_reset_scn NUMBER DEFAULT 0 NOT NULL, plugin_reset_time DATE, CONSTRAINT cdf_p PRIMARY KEY (cdf_key), CONSTRAINT cdf_u1 UNIQUE (dbinc_key, cdf_recid, cdf_stamp), CONSTRAINT cdf_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE, -- this constraint can't be enforced since datafile copies are sometimes -- inserted before the datafiles. --CONSTRAINT cdf_f2 FOREIGN KEY (dbinc_key, file#, create_scn) -- REFERENCES df ON DELETE CASCADE, CONSTRAINT cdf_f3 FOREIGN KEY (site_key) REFERENCES node, CONSTRAINT cdf_c_status CHECK (status in ('A','U','D','X','F')), CONSTRAINT cdf_c_onl_fuzzy CHECK (onl_fuzzy in ('Y','N')), CONSTRAINT cdf_c_bck_fuzzy CHECK (bck_fuzzy in ('Y','N')) ) &tablespace& >>> define cdf_i_df_key <<< CREATE INDEX cdf_i_df_key on cdf(dbinc_key, file#, create_scn) &tablespace& >>> define cdf_i_fname_status <<< CREATE INDEX cdf_i_fname_status on cdf(fname_hashkey, status) &tablespace& >>> # the XDF table contains all proxy datafile backups # A proxy datafile backup uniquely identified by dbinc_key, recid and stamp. # xdf_key is also a unique primary key define xdf <<< CREATE TABLE xdf ( xdf_key NUMBER NOT NULL, -- primary key dbinc_key NUMBER NOT NULL, -- database incarnation xdf_recid NUMBER NOT NULL, -- x$kccpd recid from control file xdf_stamp NUMBER NOT NULL, -- x$kccpd stamp from control file file# NUMBER NOT NULL, -- database file number create_scn NUMBER NOT NULL, -- creation SCN create_time DATE DEFAULT NULL,-- creation time tag VARCHAR2(32), -- df copy tag incr_level NUMBER, -- incremental backup level (null or 0) ckp_scn NUMBER NOT NULL, -- scn of the last datafile ckpt ckp_time DATE NOT NULL, -- time of the last datafile ckpt onl_fuzzy VARCHAR2(1) NOT NULL, -- 'Y' -> online fuzzy bck_fuzzy VARCHAR2(1) NOT NULL, -- 'Y' -> backup fuzzy abs_fuzzy_scn NUMBER, -- absolute fuzzy scn, if known. -- null if not known. rcv_fuzzy_scn NUMBER, -- media recovery fuzzy scn rcv_fuzzy_time DATE, -- timestamp for media rcv fuzzy scn blocks NUMBER NOT NULL, -- number of blocks block_size NUMBER NOT NULL, -- blocksize device_type VARCHAR2(255) NOT NULL, -- 'DISK' -> on disk rather than seq handle VARCHAR2(1024) NOT NULL, -- backup piece handle handle_hashkey VARCHAR2(30) NOT NULL, -- indexed hashkey on handle comments VARCHAR2(255), -- media VARCHAR2(80), -- media handle media_pool NUMBER, -- media pool start_time DATE NOT NULL, -- time when this piece started completion_time DATE NOT NULL, -- time when this piece completed -- updatable columns status VARCHAR2(1) NOT NULL, keep_options NUMBER DEFAULT 0 NOT NULL, -- if backup is done with keep option -- then keep_options can be: -- KEEP_LOGS -- KEEP_NOLOGS -- KEEP_CONSIST keep_until DATE, -- valid only if keep_options != 0 -- if NULL means forever. rsr_key NUMBER, -- key of the row in the rsr table site_key NUMBER, -- Null when proxycopy owner is unknown foreign_dbid NUMBER DEFAULT 0 NOT NULL, plugged_readonly VARCHAR2(3) DEFAULT 'NO' NOT NULL, plugin_scn NUMBER DEFAULT 0 NOT NULL, plugin_reset_scn NUMBER DEFAULT 0 NOT NULL, plugin_reset_time DATE, CONSTRAINT xdf_p PRIMARY KEY (xdf_key), CONSTRAINT xdf_u1 UNIQUE (dbinc_key, xdf_recid, xdf_stamp), CONSTRAINT xdf_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE, -- this constraint can't be enforced since proxy copies might be resynced -- before their parent datafiles --CONSTRAINT xdf_f2 FOREIGN KEY (dbinc_key, file#, create_scn) -- REFERENCES df ON DELETE CASCADE, CONSTRAINT xdf_f3 FOREIGN KEY (site_key) REFERENCES node, CONSTRAINT xdf_c_status CHECK (status in ('A','U','D','X')), CONSTRAINT xdf_c_onl_fuzzy CHECK (onl_fuzzy in ('Y','N')), CONSTRAINT xdf_c_bck_fuzzy CHECK (bck_fuzzy in ('Y','N')) ) &tablespace& >>> define xdf_i_df_key <<< CREATE INDEX xdf_i_df_key on xdf(dbinc_key, file#, create_scn) &tablespace& >>> define xdf_i_handle_status <<< CREATE INDEX xdf_i_handle_status on xdf(handle_hashkey, status) &tablespace& >>> # the XAL table contains all proxy archivelog backups # A proxy archivelog backup uniquely identified by dbinc_key, recid and stamp. # xal_key is also a unique primary key define xal <<< CREATE TABLE xal ( xal_key NUMBER NOT NULL, -- primary key dbinc_key NUMBER NOT NULL, -- database incarnation xal_recid NUMBER NOT NULL, -- x$kccpa recid from control file xal_stamp NUMBER NOT NULL, -- x$kccpa stamp from control file tag VARCHAR2(32), -- al copy tag thread# NUMBER NOT NULL, -- thread number sequence# NUMBER NOT NULL, -- log sequence number low_scn NUMBER NOT NULL, -- scn generated when switching in low_time DATE NOT NULL, -- time low SCN allocated next_scn NUMBER NOT NULL, -- scn generated when switching out next_time DATE NOT NULL, -- time when next SCN allocated blocks NUMBER NOT NULL, -- number of blocks written to backup block_size NUMBER NOT NULL, -- size of a block in bytes device_type VARCHAR2(255) NOT NULL, -- 'DISK' -> on disk rather than seq handle VARCHAR2(1024) NOT NULL,-- backup piece handle handle_hashkey VARCHAR2(30) NOT NULL, -- indexed hashkey on handle comments VARCHAR2(255), -- media VARCHAR2(80), -- media handle media_pool NUMBER, -- media pool start_time DATE NOT NULL, -- time when this piece started completion_time DATE NOT NULL, -- time when this piece completed rsr_key NUMBER, -- key of the row in the rsr table terminal VARCHAR2(3) DEFAULT 'NO', -- 'YES' for terminal rcv log keep_options NUMBER DEFAULT 0 NOT NULL, -- if backup is done with keep option -- then keep_options can be: -- KEEP_LOGS -- KEEP_NOLOGS -- KEEP_CONSIST keep_until DATE, -- valid only if keep_options != 0 -- if NULL means forever. -- updatable columns status VARCHAR2(1) NOT NULL, site_key NUMBER, -- Null when proxycopy owner is unknown CONSTRAINT xal_p PRIMARY KEY (xal_key), CONSTRAINT xal_u1 UNIQUE (dbinc_key, xal_recid, xal_stamp), CONSTRAINT xal_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE, CONSTRAINT xal_f2 FOREIGN KEY (site_key) REFERENCES node, CONSTRAINT xal_c_status CHECK (status in ('A','U','D','X')) ) &tablespace& >>> define xal_i_al_key <<< CREATE INDEX xal_i_al_key on xal(dbinc_key, thread#, sequence#) &tablespace& >>> define xal_i_handle_status <<< CREATE INDEX xal_i_handle_status on xal(handle_hashkey, status) &tablespace& >>> # The BRL table contains backup redo logs (in backup sets) # It corresponds to the V$REDOLOG_BACKUP fixed view in in the control file. # The redo log backups are uniquely identified by bs_key, brl_recid and # brl_stamp. define brl <<< CREATE TABLE brl ( brl_key NUMBER NOT NULL, dbinc_key NUMBER NOT NULL, -- database incarnation brl_recid NUMBER NOT NULL, -- recid from control file brl_stamp NUMBER NOT NULL, -- stamp from control file bs_key NUMBER NOT NULL, -- backup set key thread# NUMBER NOT NULL, -- thread number sequence# NUMBER NOT NULL, -- log sequence number low_scn NUMBER NOT NULL, -- scn generated when switching in low_time DATE NOT NULL, -- time low SCN allocated next_scn NUMBER NOT NULL, -- scn generated when switching out next_time DATE NOT NULL, -- time when next SCN allocated blocks NUMBER NOT NULL, -- number of blocks written to backup block_size NUMBER NOT NULL, -- size of a block in bytes terminal VARCHAR2(3) DEFAULT 'NO', -- 'YES' for terminal rcv log CONSTRAINT brl_p PRIMARY KEY (brl_key), CONSTRAINT brl_u2 UNIQUE (bs_key, thread#, sequence#), CONSTRAINT brl_f1 FOREIGN KEY (bs_key) REFERENCES bs ON DELETE CASCADE ) &tablespace& >>> define brl_i_bs_key <<< CREATE INDEX brl_i_bs_key on brl(bs_key) &tablespace& >>> define brl_i_dts <<< CREATE INDEX brl_i_dts on brl(dbinc_key, thread#, sequence#) &tablespace& >>> # The CB table contains corrupt block ranges in datafile backups. # It corresponds to the V$BACKUP_CORRUPTION fixed view in the control file. define bcb <<< CREATE TABLE bcb ( bdf_key NUMBER NOT NULL, -- datafile backup or copy bcb_recid NUMBER NOT NULL, -- recid from control file bcb_stamp NUMBER NOT NULL, -- stamp from control file piece# NUMBER NOT NULL, -- backup piece to which block belongs block# NUMBER NOT NULL, -- starting block number in the file blocks NUMBER NOT NULL, -- block count in corrupt range corrupt_scn NUMBER, -- scn at which corruption was detected marked_corrupt VARCHAR2(1) NOT NULL, -- 'Y' -> could not read from disk corruption_type VARCHAR2(9), CONSTRAINT bcb_u1 UNIQUE (bdf_key, bcb_recid, bcb_stamp), CONSTRAINT bcb_f1 FOREIGN KEY (bdf_key) REFERENCES bdf ON DELETE CASCADE ) &tablespace& >>> # The CCB table contains corrupt block ranges in datafile copies # It corresponds to the V$COPY_CORRUPTION fixed view in the control file. define ccb <<< CREATE TABLE ccb ( cdf_key NUMBER NOT NULL, -- datafile copy ccb_recid NUMBER NOT NULL, -- recid from control file ccb_stamp NUMBER NOT NULL, -- stamp from control file block# NUMBER NOT NULL, -- block number in the file blocks NUMBER NOT NULL, -- block count in corrupt range corrupt_scn NUMBER, -- scn at which corruption was detected marked_corrupt VARCHAR2(1) NOT NULL, -- 'Y' -> could not read from disk corruption_type VARCHAR2(9), CONSTRAINT ccb_u1 UNIQUE (cdf_key, ccb_recid, ccb_stamp), CONSTRAINT ccb_f1 FOREIGN KEY (cdf_key) REFERENCES cdf ON DELETE CASCADE ) &tablespace& >>> # The RSR table stores history of RMAN backups. define rsr <<< CREATE TABLE rsr ( rsr_key NUMBER NOT NULL, dbinc_key NUMBER NOT NULL, -- database incarnation rsr_recid NUMBER NOT NULL, -- recid from control file rsr_stamp NUMBER NOT NULL, -- stamp from control file rsr_pkey NUMBER, -- key of the parent, NULL if no rsr_l0key NUMBER, -- key of the level 0, NULL if no rsr_level NUMBER, -- level rsr_type VARCHAR2(33), -- row type rsr_oper VARCHAR2(33), -- operation rsr_cmdid VARCHAR2(33), -- command id rsr_status VARCHAR2(33), -- status rsr_mbytes NUMBER NOT NULL, -- megabytes rsr_start DATE NOT NULL, -- start time rsr_end DATE, -- end time rsr_ibytes NUMBER, -- input megabytes processed rsr_obytes NUMBER, -- output megabytes produced rsr_optimized VARCHAR2(3), -- was optimization applied rsr_otype VARCHAR2(80), -- input object types involved in oper rsr_srecid NUMBER, -- session recid rsr_sstamp NUMBER, -- session stamp rsr_odevtype VARCHAR2(17), -- output device type site_key NUMBER, -- Null when rman job owner is unknown rsr_osb_allocated VARCHAR2(1), -- 'Y' when OSB is allocated CONSTRAINT rsr_key PRIMARY KEY(rsr_key), CONSTRAINT rsr_u2 UNIQUE (dbinc_key, rsr_recid, rsr_stamp, site_key), CONSTRAINT rsr_f1 FOREIGN KEY(dbinc_key) REFERENCES dbinc ON DELETE CASCADE, CONSTRAINT rsr_f2 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE ) &tablespace& >>> # The SCR table contains 1 row for each stored script. define scr <<< CREATE TABLE scr ( scr_key NUMBER NOT NULL, -- sequence generated primary key db_key NUMBER, -- database that owns this script, -- NULL indicates it is a global script scr_name VARCHAR2(100) NOT NULL, -- script name scr_comment VARCHAR2(255), -- comment for script CONSTRAINT scr_p PRIMARY KEY(scr_key), CONSTRAINT scr_f1 FOREIGN KEY(db_key) REFERENCES db ON DELETE CASCADE, CONSTRAINT scr_u1 UNIQUE(db_key, scr_name) ) &tablespace& >>> define scr_trigger <<< create or replace trigger scr_trigger before insert or update or delete on scr for each row declare global_script boolean; begin if inserting then global_script := :new.db_key is null; elsif updating then global_script := :old.db_key is null or :new.db_key is null; elsif deleting then global_script := :old.db_key is null; end if; if not global_script or dbms_catowner = user then return; end if; raise_application_error(num=>-20016, msg=>'virtual private catalog user cannot modify global scripts'); end; >>> # The SCRL table contains 1 row for each line of each stored script. define scrl <<< CREATE TABLE scrl ( scr_key NUMBER NOT NULL, -- script that owns this line db_key NUMBER, -- database that owns this script linenum NUMBER NOT NULL, -- line number text VARCHAR2(1024) NOT NULL,-- text of the line CONSTRAINT scrl_u1 UNIQUE(scr_key, linenum), CONSTRAINT scrl_f1 FOREIGN KEY(scr_key) REFERENCES scr ON DELETE CASCADE ) &tablespace& >>> define scrl_trigger <<< create or replace trigger scrl_trigger before insert or update or delete on scrl for each row declare dbkey number; begin if inserting then select db_key into dbkey from scr where scr_key = :new.scr_key; :new.db_key := dbkey; end if; if dbms_catowner = user then return; end if; if updating then dbkey := :new.db_key; if :old.db_key <> :new.db_key then raise_application_error(num=>-20017, msg=>'illegal script update operation'); end if; elsif deleting then dbkey := :old.db_key; end if; if dbkey is not null then return; end if; raise_application_error(num=>-20016, msg=>'virtual private catalog user cannot modify global scripts'); end; >>> define rout <<< CREATE TABLE rout ( db_key NUMBER NOT NULL, -- database output belongs to rsr_key NUMBER NOT NULL, -- command that generated the output rout_skey NUMBER NOT NULL, -- session that created the output rout_recid NUMBER NOT NULL, -- record id from server rout_stamp NUMBER NOT NULL, -- timestamp when row was added rout_text VARCHAR2(130) NOT NULL, -- RMAN output CONSTRAINT rout_u1 UNIQUE(db_key, rout_skey, rsr_key, rout_recid, rout_stamp), CONSTRAINT rout_f1 FOREIGN KEY(db_key) REFERENCES db ON DELETE CASCADE, CONSTRAINT rout_f2 FOREIGN KEY(rsr_key) REFERENCES rsr ON DELETE CASCADE ) &tablespace& >>> # # For 8.1.6 rman exec 'configure compatible' commands not # to throw sql errors # define config <<< CREATE TABLE config ( name VARCHAR2(30) NOT NULL, -- name of configuration option value VARCHAR2(100) -- value of configuration option ) &tablespace& >>> define fb <<< CREATE TABLE fb ( dbinc_key NUMBER NOT NULL, db_unique_name VARCHAR2(512) NOT NULL, -- Instance site name oldest_flashback_scn NUMBER, -- guaranteed flashback scn oldest_flashback_time DATE DEFAULT NULL, -- flashback target time CONSTRAINT fb_u1 UNIQUE(dbinc_key, db_unique_name), CONSTRAINT fb_f1 FOREIGN KEY(dbinc_key) REFERENCES dbinc ON DELETE CASCADE ) &tablespace& >>> define grsp <<< CREATE TABLE grsp ( dbinc_key NUMBER NOT NULL, site_key NUMBER, rspname VARCHAR2(128) NOT NULL, creation_time DATE DEFAULT NULL, rsptime DATE DEFAULT NULL, from_scn NUMBER, to_scn NUMBER, guaranteed VARCHAR2(3) DEFAULT 'YES', CONSTRAINT grsp_u1 UNIQUE(site_key, rspname), CONSTRAINT grsp_u2 FOREIGN KEY(dbinc_key) REFERENCES dbinc ON DELETE CASCADE, CONSTRAINT grsp_u3 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE ) &tablespace& >>> define nrsp <<< CREATE TABLE nrsp ( nrsp_recid NUMBER NOT NULL, nrsp_stamp NUMBER NOT NULL, dbinc_key NUMBER NOT NULL, site_key NUMBER, rspname VARCHAR2(128) NOT NULL, creation_time DATE DEFAULT NULL, rsptime DATE DEFAULT NULL, to_scn NUMBER, long_term VARCHAR2(3), CONSTRAINT nrsp_u1 UNIQUE (dbinc_key, nrsp_recid, nrsp_stamp, site_key), CONSTRAINT nrsp_f1 FOREIGN KEY(dbinc_key) REFERENCES dbinc ON DELETE CASCADE, CONSTRAINT nrsp_f2 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE ) &tablespace& >>> # The bcr table contains corrupt block ranges in datafile # It corresponds to the V$DATABASE_BLOCK_CORRUPTION fixed # view in the control file. define bcr <<< CREATE TABLE bcr ( bcr_recid NUMBER NOT NULL, -- recid from control file bcr_stamp NUMBER NOT NULL, -- stamp from control file df_key NUMBER NOT NULL, -- df_key in df table site_key NUMBER NOT NULL, -- which db_unique_name it contains block# NUMBER NOT NULL, -- block number in the file blocks NUMBER NOT NULL, -- block count in corrupt range corrupt_scn NUMBER, -- scn at which corruption was corruption_type VARCHAR2(9), CONSTRAINT bcr_p PRIMARY KEY (bcr_recid, bcr_stamp, site_key), CONSTRAINT bcr_f1 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE, CONSTRAINT bcr_f2 FOREIGN KEY (df_key, site_key) REFERENCES site_dfatt ON DELETE CASCADE, CONSTRAINT bcr_c_blocks CHECK (blocks != 0) ) &tablespace& >>> define vpc_users <<< CREATE TABLE vpc_users ( filter_user VARCHAR2(128) NOT NULL, -- Name of user who has access filter_uid NUMBER constraint vpc_users_uid_nn NOT NULL, -- uid of above add_new_db CHAR(1), -- Y -> user can add new DBs to catalog version VARCHAR2(12), -- Version this user is sync'ed to CONSTRAINT vpc_users_p PRIMARY KEY(filter_user) ) &tablespace& >>> define vpc_databases <<< CREATE TABLE vpc_databases ( filter_user VARCHAR2(128) NOT NULL, -- Name of user who has access filter_uid NUMBER constraint vpc_databases_uid_nn NOT NULL, -- uid of above db_id NUMBER NOT NULL, -- Database to which this user has access CONSTRAINT vpc_databases_f1 FOREIGN KEY(filter_user) REFERENCES vpc_users, CONSTRAINT vpc_databases_u1 UNIQUE(filter_user, db_id) ) &tablespace& >>> define upgcat_vpc_users_add_uid <<< alter table vpc_users add (filter_uid number) >>> define upgcat_vpc_users_populate_uid <<< -- After adding the filter_uid column to vpc_users we must fill in -- the correct uid for each user in the table. Users that exist in -- vpc_users but don't exist in all_users will have the uid column -- set to null. begin update vpc_users set filter_uid = (select user_id from all_users where username = filter_user) where filter_uid is null; commit; end; >>> define upgcat_vpc_databases_add_uid <<< alter table vpc_databases add (filter_uid number) >>> define upgcat_vpc_databases_populate_uid <<< -- After adding the filter_uid column to vpc_databases we must fill in -- the correct uid for each user in the table. Users that exist in -- vpc_databases but don't have a valid id in vpc_users will be deleted. begin update vpc_databases set filter_uid = (select filter_uid from vpc_users where vpc_databases.filter_user = vpc_users.filter_user) where filter_uid is null; delete from vpc_databases where filter_uid is null; commit; end; >>> define upgcat_vpc_users_delete_null_uids <<< begin delete from vpc_users where filter_uid is null; commit; end; >>> define upgcat_vpc_users_uid_not_null <<< alter table vpc_users modify(filter_uid constraint vpc_users_uid_nn not null) >>> define upgcat_vpc_databases_uid_not_null <<< alter table vpc_databases modify(filter_uid constraint vpc_databases_uid_nn not null) >>> define cfs <<< CREATE TABLE cfs ( stmt_number NUMBER NOT NULL, -- statement number stmt_type CHAR(1), -- statement type (C=create, D=drop) stmt_sql LONG -- DDL statement to execute ) &tablespace& >>> # # Filter views and grants. There must be a view and grant here for every # base table defined above. Note that there is no harm in granting the # filter views to public because they will be empty to all users except # those that have been granted access to specific databases. # # Unlike the other filter views, db_v does not use 'with check option'. # This is done to allow those users who are authorized to add rows # for new databases into the db table. define db_v <<< create or replace view db_v as select * from db where db_id in (select db_id from vpc_databases where filter_uid = uid) >>> define dbinc_v <<< create or replace view dbinc_v as select * from dbinc where db_key in (select db_key from db_v) with check option >>> define al_v <<< create or replace view al_v as select * from al where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define bcf_v <<< create or replace view bcf_v as select * from bcf where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define bdf_v <<< create or replace view bdf_v as select * from bdf where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define brl_v <<< create or replace view brl_v as select * from brl where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define ccf_v <<< create or replace view ccf_v as select * from ccf where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define cdf_v <<< create or replace view cdf_v as select * from cdf where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define ckp_v <<< create or replace view ckp_v as select * from ckp where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define df_v <<< create or replace view df_v as select * from df where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define site_dfatt_v <<< create or replace view site_dfatt_v as select * from site_dfatt where site_key in (select site_key from node_v) with check option >>> define fb_v <<< create or replace view fb_v as select * from fb where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define offr_v <<< create or replace view offr_v as select * from offr where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define orl_v <<< create or replace view orl_v as select * from orl where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define rlh_v <<< create or replace view rlh_v as select * from rlh where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define rr_v <<< create or replace view rr_v as select * from rr where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define rsr_v <<< create or replace view rsr_v as select * from rsr where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define rt_v <<< create or replace view rt_v as select * from rt where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define site_tfatt_v <<< create or replace view site_tfatt_v as select * from site_tfatt where site_key in (select site_key from node_v) with check option >>> define tf_v <<< create or replace view tf_v as select * from tf where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define tsatt_v <<< create or replace view tsatt_v as select * from tsatt where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define ts_v <<< create or replace view ts_v as select * from ts where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define xal_v <<< create or replace view xal_v as select * from xal where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define xcf_v <<< create or replace view xcf_v as select * from xcf where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define xdf_v <<< create or replace view xdf_v as select * from xdf where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define grsp_v <<< create or replace view grsp_v as select * from grsp where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> define nrsp_v <<< create or replace view nrsp_v as select * from nrsp where dbinc_key in (select dbinc_key from dbinc_v) with check option >>> ######################################################### # Add views here for new tables whose key is dbinc_key. # ######################################################### define bp_v <<< create or replace view bp_v as select * from bp where db_key in (select db_key from db_v) with check option >>> define bsf_v <<< create or replace view bsf_v as select * from bsf where db_key in (select db_key from db_v) with check option >>> define bs_v <<< create or replace view bs_v as select * from bs where db_key in (select db_key from db_v) with check option >>> define conf_v <<< create or replace view conf_v as select * from conf where db_key in (select db_key from db_v) with check option >>> define node_v <<< create or replace view node_v as select * from node where db_key in (select db_key from db_v) with check option >>> define rout_v <<< create or replace view rout_v as select * from rout where db_key in (select db_key from db_v) with check option >>> ###################################################### # Add views here for new tables whose key is db_key. # ###################################################### ############################################################################# # Special views that fit neither the dbinc_key or db_key category go below. # ############################################################################# define scr_v <<< create or replace view scr_v as select * from scr where db_key in (select db_key from db_v) or db_key is null with check option >>> define bcb_v <<< create or replace view bcb_v as select * from bcb where bdf_key in (select bdf_key from bdf_v) with check option >>> define ccb_v <<< create or replace view ccb_v as select * from ccb where cdf_key in (select cdf_key from cdf_v) with check option >>> define scrl_v <<< create or replace view scrl_v as select * from scrl where scr_key in (select scr_key from scr_v) with check option >>> define config_v <<< create or replace view config_v as select * from config where exists (select 1 from vpc_users where filter_uid = uid) with check option >>> define rcver_v <<< create or replace view rcver_v as select * from rcver where exists (select 1 from vpc_users where filter_uid = uid) with check option >>> define vpc_users_v <<< create or replace view vpc_users_v as select version from vpc_users where filter_uid = uid with check option >>> define vpc_databases_v <<< create or replace view vpc_databases_v as select * from vpc_databases where filter_uid = uid >>> define cfs_v <<< create or replace view cfs_v as select * from cfs where exists (select 1 from vpc_users where filter_uid = uid) with check option >>> define bcr_v <<< create or replace view bcr_v as select * from bcr where df_key in (select df_key from df_v) with check option >>> define dbms_catowner <<< create or replace function dbms_catowner return varchar2 is u varchar2(128); begin select username into u from user_users; return u; end; >>> define grant_al_v <<< grant select,insert,update,delete on al_v to public >>> define grant_bcb_v <<< grant select,insert,update,delete on bcb_v to public >>> define grant_bcf_v <<< grant select,insert,update,delete on bcf_v to public >>> define grant_bdf_v <<< grant select,insert,update,delete on bdf_v to public >>> define grant_bp_v <<< grant select,insert,update,delete on bp_v to public >>> define grant_brl_v <<< grant select,insert,update,delete on brl_v to public >>> define grant_bs_v <<< grant select,insert,update,delete on bs_v to public >>> define grant_bsf_v <<< grant select,insert,update,delete on bsf_v to public >>> define grant_ccb_v <<< grant select,insert,update,delete on ccb_v to public >>> define grant_ccf_v <<< grant select,insert,update,delete on ccf_v to public >>> define grant_cdf_v <<< grant select,insert,update,delete on cdf_v to public >>> define grant_ckp_v <<< grant select,insert,update,delete on ckp_v to public >>> define grant_conf_v <<< grant select,insert,update,delete on conf_v to public >>> define grant_db_v <<< grant select,insert,update,delete on db_v to public >>> define grant_dbinc_v <<< grant select,insert,update,delete on dbinc_v to public >>> define grant_df_v <<< grant select,insert,update,delete on df_v to public >>> define grant_site_dfatt_v <<< grant select,insert,update,delete on site_dfatt_v to public >>> define grant_fb_v <<< grant select,insert,update,delete on fb_v to public >>> define grant_grsp_v <<< grant select,insert,update,delete on grsp_v to public >>> define grant_nrsp_v <<< grant select,insert,update,delete on nrsp_v to public >>> define grant_node_v <<< grant select,insert,update,delete on node_v to public >>> define grant_offr_v <<< grant select,insert,update,delete on offr_v to public >>> define grant_orl_v <<< grant select,insert,update,delete on orl_v to public >>> define grant_rlh_v <<< grant select,insert,update,delete on rlh_v to public >>> define grant_rout_v <<< grant select,insert,update,delete on rout_v to public >>> define grant_rr_v <<< grant select,insert,update,delete on rr_v to public >>> define grant_rsr_v <<< grant select,insert,update,delete on rsr_v to public >>> define grant_rt_v <<< grant select,insert,update,delete on rt_v to public >>> define grant_scr_v <<< grant select,insert,update,delete on scr_v to public >>> define grant_scrl_v <<< grant select,insert,update,delete on scrl_v to public >>> define grant_tf_v <<< grant select,insert,update,delete on tf_v to public >>> define grant_site_tfatt_v <<< grant select,insert,update,delete on site_tfatt_v to public >>> define grant_ts_v <<< grant select,insert,update,delete on ts_v to public >>> define grant_tsatt_v <<< grant select,insert,update,delete on tsatt_v to public >>> define grant_xal_v <<< grant select,insert,update,delete on xal_v to public >>> define grant_xcf_v <<< grant select,insert,update,delete on xcf_v to public >>> define grant_xdf_v <<< grant select,insert,update,delete on xdf_v to public >>> define grant_config_v <<< grant select on config_v to public >>> define grant_rcver_v <<< grant select on rcver_v to public >>> define grant_vpc_users_v <<< grant select,update on vpc_users_v to public >>> define grant_vpc_databases_v <<< grant select on vpc_databases_v to public >>> define grant_cfs_v <<< grant select on cfs_v to public >>> define grant_bcr_v <<< grant select,insert,update,delete on bcr_v to public >>> define grant_rman_seq <<< grant select on rman_seq to public >>> define grant_dbms_rcvman <<< grant execute on dbms_rcvman to public >>> define grant_dbms_rcvcat <<< grant execute on dbms_rcvcat to public >>> define grant_rc_listBackupPipe <<< grant execute on rc_listBackupPipe to public >>> define grant_dbms_catowner <<< grant execute on dbms_catowner to public >>> define drop_al_v <<< drop view al_v >>> define drop_bcf_v <<< drop view bcf_v >>> define drop_bdf_v <<< drop view bdf_v >>> define drop_brl_v <<< drop view brl_v >>> define drop_ccf_v <<< drop view ccf_v >>> define drop_cdf_v <<< drop view cdf_v >>> define drop_ckp_v <<< drop view ckp_v >>> define drop_site_dfatt_v <<< drop view site_dfatt_v >>> define drop_df_v <<< drop view df_v >>> define drop_fb_v <<< drop view fb_v >>> define drop_grsp_v <<< drop view grsp_v >>> define drop_nrsp_v <<< drop view nrsp_v >>> define drop_offr_v <<< drop view offr_v >>> define drop_orl_v <<< drop view orl_v >>> define drop_rlh_v <<< drop view rlh_v >>> define drop_rr_v <<< drop view rr_v >>> define drop_rsr_v <<< drop view rsr_v >>> define drop_rt_v <<< drop view rt_v >>> define drop_site_tfatt_v <<< drop view site_tfatt_v >>> define drop_tf_v <<< drop view tf_v >>> define drop_tsatt_v <<< drop view tsatt_v >>> define drop_ts_v <<< drop view ts_v >>> define drop_xal_v <<< drop view xal_v >>> define drop_xcf_v <<< drop view xcf_v >>> define drop_xdf_v <<< drop view xdf_v >>> define drop_bp_v <<< drop view bp_v >>> define drop_bsf_v <<< drop view bsf_v >>> define drop_bs_v <<< drop view bs_v >>> define drop_conf_v <<< drop view conf_v >>> define drop_dbinc_v <<< drop view dbinc_v >>> define drop_db_v <<< drop view db_v >>> define drop_node_v <<< drop view node_v >>> define drop_rout_v <<< drop view rout_v >>> define drop_scr_v <<< drop view scr_v >>> define drop_bcb_v <<< drop view bcb_v >>> define drop_ccb_v <<< drop view ccb_v >>> define drop_scrl_v <<< drop view scrl_v >>> define drop_config_v <<< drop view config_v >>> define drop_rcver_v <<< drop view rcver_v >>> define drop_vpc_users_v <<< drop view vpc_users_v >>> define drop_vpc_databases_v <<< drop view vpc_databases_v >>> define drop_cfs_v <<< drop view cfs_v >>> define drop_bcr_v <<< drop view bcr_v >>> define drop_dbms_catowner <<< drop function dbms_catowner >>> define syn_al_v <<< create or replace synonym al for &co&.al_v >>> define syn_bcb_v <<< create or replace synonym bcb for &co&.bcb_v >>> define syn_bcf_v <<< create or replace synonym bcf for &co&.bcf_v >>> define syn_bdf_v <<< create or replace synonym bdf for &co&.bdf_v >>> define syn_bp_v <<< create or replace synonym bp for &co&.bp_v >>> define syn_brl_v <<< create or replace synonym brl for &co&.brl_v >>> define syn_bs_v <<< create or replace synonym bs for &co&.bs_v >>> define syn_bsf_v <<< create or replace synonym bsf for &co&.bsf_v >>> define syn_ccb_v <<< create or replace synonym ccb for &co&.ccb_v >>> define syn_ccf_v <<< create or replace synonym ccf for &co&.ccf_v >>> define syn_cdf_v <<< create or replace synonym cdf for &co&.cdf_v >>> define syn_vpc_users_v <<< create or replace synonym vpc_users for &co&.vpc_users_v >>> define syn_vpc_databases_v <<< create or replace synonym vpc_databases for &co&.vpc_databases_v >>> define syn_ckp_v <<< create or replace synonym ckp for &co&.ckp_v >>> define syn_conf_v <<< create or replace synonym conf for &co&.conf_v >>> define syn_config_v <<< create or replace synonym config for &co&.config_v >>> define syn_db_v <<< create or replace synonym db for &co&.db_v >>> define syn_dbinc_v <<< create or replace synonym dbinc for &co&.dbinc_v >>> define syn_dbms_rcvcat <<< create or replace synonym dbms_rcvcat for &co&.dbms_rcvcat >>> define syn_dbms_rcvman <<< create or replace synonym dbms_rcvman for &co&.dbms_rcvman >>> define syn_df_v <<< create or replace synonym df for &co&.df_v >>> define syn_site_dfatt_v <<< create or replace synonym site_dfatt for &co&.site_dfatt_v >>> define syn_fb_v <<< create or replace synonym fb for &co&.fb_v >>> define syn_grsp_v <<< create or replace synonym grsp for &co&.grsp_v >>> define syn_nrsp_v <<< create or replace synonym nrsp for &co&.nrsp_v >>> define syn_node_v <<< create or replace synonym node for &co&.node_v >>> define syn_offr_v <<< create or replace synonym offr for &co&.offr_v >>> define syn_orl_v <<< create or replace synonym orl for &co&.orl_v >>> define syn_rcver_v <<< create or replace synonym rcver for &co&.rcver_v >>> define syn_rlh_v <<< create or replace synonym rlh for &co&.rlh_v >>> define syn_rman_seq<<< create or replace synonym rman_seq for &co&.rman_seq >>> define syn_rout_v <<< create or replace synonym rout for &co&.rout_v >>> define syn_rr_v <<< create or replace synonym rr for &co&.rr_v >>> define syn_rsr_v <<< create or replace synonym rsr for &co&.rsr_v >>> define syn_rt_v <<< create or replace synonym rt for &co&.rt_v >>> define syn_scr_v <<< create or replace synonym scr for &co&.scr_v >>> define syn_scrl_v <<< create or replace synonym scrl for &co&.scrl_v >>> define syn_tf_v <<< create or replace synonym tf for &co&.tf_v >>> define syn_site_tfatt_v <<< create or replace synonym site_tfatt for &co&.site_tfatt_v >>> define syn_ts_v <<< create or replace synonym ts for &co&.ts_v >>> define syn_tsatt_v <<< create or replace synonym tsatt for &co&.tsatt_v >>> define syn_xal_v <<< create or replace synonym xal for &co&.xal_v >>> define syn_xcf_v <<< create or replace synonym xcf for &co&.xcf_v >>> define syn_xdf_v <<< create or replace synonym xdf for &co&.xdf_v >>> define syn_rc_listBackupPipe <<< create or replace synonym rc_listBackupPipe for &co&.rc_listBackupPipe >>> define syn_bcr_v <<< create or replace synonym bcr for &co&.bcr_v >>> define syn_drop_all <<< begin for s in (select * from user_synonyms where table_owner = &co&.dbms_catowner) loop execute immediate 'drop synonym ' || s.synonym_name; end loop; end; >>> define config_update <<< begin insert into config select 'compatible','080004' from dual where not exists (select * from config); commit; end; >>> # #Table to track the temporay resource that is allocated in recovery catalog #for import catalog command. # #It is important to note that one should obtain rowlock on the object #that is about to be dropped and then delete the row from tempres. # define tempres <<< CREATE TABLE tempres ( name VARCHAR2(32) NOT NULL, -- name of the temporary resource data_type VARCHAR2(32) NOT NULL, -- TABLE or DBLINK CONSTRAINT tempres_u1 UNIQUE(name) ) &tablespace& >>> # #This is the template to create a idb (import db) table in source recovery #catalog database. The table name is randomly generated. # #idb table is used with other tables in conditional clause to filter the #interested dbids in source recovery catalog database. # define idb <<< CREATE TABLE &1& ( db_key NUMBER NOT NULL, db_id NUMBER NOT NULL, CONSTRAINT &2&_p PRIMARY KEY (db_key), CONSTRAINT &3&_u1 UNIQUE(db_id) ) >>> # #This is the template to create a idbinc (import dbinc) table in source #recovery catalog database. The table name is randomly generated. # #idbinc table is used with other tables in conditional clause to filter the #interested dbids in source recovery catalog database. # define idbinc <<< CREATE TABLE &1& ( dbinc_key NUMBER NOT NULL, CONSTRAINT &2&_p PRIMARY KEY(dbinc_key) ) >>> # #This is the template to create a dblink to source recovery catalog database #for import catalog command. The dblink name is randomly generated. #The dblink is created in destination recovery catalog database. # define dblink <<< --BEGIN_PRINT-- -- CREATE DATABASE LINK ****** CONNECT TO ****** -- IDENTIFIED BY ****** USING ****** --END_PRINT-- CREATE DATABASE LINK &1& CONNECT TO &2& IDENTIFIED BY &3& USING &4& >>> # # Recovery Catalog views # # databases registered in recovery catalog # information about databases and their current incarnations define rc_database <<< create or replace view rc_database as select db.db_key, db.curr_dbinc_key dbinc_key, db.db_id dbid, dbinc.db_name name, dbinc.reset_scn resetlogs_change#, dbinc.reset_time resetlogs_time from db, dbinc where db.curr_dbinc_key = dbinc.dbinc_key >>> # RMAN configuration for the database registered in recovery catalog # This view shows information about how to backup and restore databases define rc_rman_configuration <<< create or replace view rc_rman_configuration as select conf.db_key, conf.conf# conf#, conf.name name, conf.value value, conf.db_unique_name db_unique_name, conf.site_key from db, conf where db.db_key = conf.db_key >>> # database incarnations # information about all incarnations registered in recovery catalog define rc_database_incarnation <<< create or replace view rc_database_incarnation as select db.db_key, db.db_id dbid, cur.dbinc_key, cur.db_name name, cur.reset_scn resetlogs_change#, cur.reset_time resetlogs_time, decode(cur.dbinc_key, db.curr_dbinc_key, 'YES', 'NO') current_incarnation, cur.parent_dbinc_key, par.reset_scn prior_resetlogs_change#, par.reset_time prior_resetlogs_time, cur.dbinc_status status from db, dbinc cur, dbinc par where db.db_key = cur.db_key and (cur.parent_dbinc_key = par.dbinc_key) union select db.db_key, db.db_id dbid, dbinc.dbinc_key, dbinc.db_name name, dbinc.reset_scn resetlogs_change#, dbinc.reset_time resetlogs_time, decode(dbinc.dbinc_key, db.curr_dbinc_key, 'YES', 'NO') current_incarnation, to_number(null), to_number(null), to_date(null), dbinc.dbinc_status status from db, dbinc where db.db_key=dbinc.db_key and dbinc.parent_dbinc_key IS NULL -- get last incarnation >>> # recovery catalog resyncs (checkpoints) # information about recovery catalog resyncs # N.B. If you code any new uses for rc_resync, or change existing uses, make # sure that the code in dbms_rcvcat.cleanupCKP does not delete the rows you # need. define rc_resync <<< create or replace view rc_resync as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, ckp.ckp_key resync_key, ckp.ckp_scn controlfile_change#, ckp.ckp_time controlfile_time, ckp.ckp_cf_seq controlfile_sequence#, ckp.cf_create_time controlfile_version, ckp.ckp_type resync_type, ckp.ckp_db_status db_status, ckp.resync_time from ckp, dbinc where ckp.dbinc_key = dbinc.dbinc_key >>> # rc_checkpoint is replaced by rc_resync, but is still used by some tests # N.B. If you code any new uses for rc_checkpoint, or change existing uses, # make sure that the code in dbms_rcvcat.cleanupCKP does not delete the rows # you need. define rc_checkpoint <<< create or replace view rc_checkpoint as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, ckp.ckp_key, ckp.ckp_scn, ckp.ckp_cf_seq, ckp.ckp_time, ckp.ckp_type from ckp, dbinc where ckp.dbinc_key = dbinc.dbinc_key >>> # tablespaces # information about all tablespaces registered in recovery catalog # show also dropped tablespaces and tablespaces that belong to old # database incarnations # current value is showed for tablespace attributes define rc_tablespace <<< create or replace view rc_tablespace as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, ts.ts#, ts.ts_name name, ts.create_scn creation_change#, ts.create_time creation_time, ts.drop_scn drop_change#, ts.drop_time, ts.included_in_database_backup, ts.bigfile, ts.temporary, ts.encrypt_in_backup, ts.plugin_scn plugin_change# from ts, dbinc where dbinc.dbinc_key = ts.dbinc_key >>> # datafiles # information about all datafiles registered in recovery catalog # the datafiles are showed dropped if their tablespace is dropped # similar to rc_tablespace view define rc_datafile <<< create or replace view rc_datafile as select a.db_key, a.dbinc_key, a.db_name, a.ts#, a.tablespace_name, a.file#, a.creation_change#, a.creation_time, a.drop_change#, a.drop_time, a.bytes, a.blocks, a.block_size, site_dfatt.fname name, a.stop_change#, a.stop_time, a.read_only, a.rfile#, a.included_in_database_backup, a.aux_name, a.encrypt_in_backup, a.site_key, a.db_unique_name, a.foreign_dbid, a.foreign_create_scn foreign_creation_change#, a.foreign_create_time foreign_creation_time, a.plugged_readonly, a.plugin_scn plugin_change#, a.plugin_reset_scn plugin_resetlogs_change#, a.plugin_reset_time plugin_resetlogs_time, a.creation_thread, a.creation_size from (select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, ts.ts#, ts.ts_name tablespace_name, df.file#, df.create_scn creation_change#, df.create_time creation_time, df.drop_scn drop_change#, df.drop_time, df.blocks*df.block_size bytes, df.blocks, df.block_size, df.stop_scn stop_change#, df.stop_time, df.read_only read_only, df.rfile#, ts.included_in_database_backup, df.clone_fname aux_name, ts.encrypt_in_backup, df.df_key, node.site_key, node.db_unique_name, df.foreign_dbid, df.foreign_create_scn, df.foreign_create_time, df.plugged_readonly, df.plugin_scn, df.plugin_reset_scn, df.plugin_reset_time, df.create_thread creation_thread, df.create_size creation_size from dbinc, ts, df, node where dbinc.dbinc_key = ts.dbinc_key and ts.dbinc_key = df.dbinc_key and ts.ts# = df.ts# and ts.create_scn = df.ts_create_scn and node.db_key = dbinc.db_key) a, site_dfatt where a.site_key = site_dfatt.site_key(+) and a.df_key = site_dfatt.df_key(+) >>> # tempfiles # information about all tempfiles registered in recovery catalog # the tempfiles are showed dropped if their tablespace is dropped # similar to rc_tablespace view define rc_tempfile <<< create or replace view rc_tempfile as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, ts.ts#, ts.ts_name tablespace_name, tf.file#, tf.create_scn creation_change#, tf.create_time creation_time, site_tfatt.drop_scn drop_change#, site_tfatt.drop_time, site_tfatt.blocks * tf.block_size bytes, site_tfatt.blocks, tf.block_size, site_tfatt.fname name, tf.rfile#, site_tfatt.autoextend, site_tfatt.max_size maxsize, site_tfatt.next_size nextsize, ts.bigfile, node.site_key, node.db_unique_name, ts.create_scn tablespace_creation_change#, ts.create_time tablespace_creation_time, ts.drop_scn tablespace_drop_change#, ts.drop_time tablespace_drop_time from dbinc, ts, tf, node, site_tfatt where dbinc.dbinc_key = ts.dbinc_key and ts.dbinc_key = tf.dbinc_key and ts.ts# = tf.ts# and ts.create_scn = tf.ts_create_scn and node.db_key = dbinc.db_key and tf.tf_key = site_tfatt.tf_key and node.site_key = site_tfatt.site_key >>> # information about redo threads define rc_redo_thread <<< create or replace view rc_redo_thread as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, rt.thread#, rt.status, rt.sequence#, rt.enable_scn enable_change#, rt.enable_time, rt.disable_scn disable_change#, rt.disable_time from dbinc, rt where dbinc.dbinc_key = rt.dbinc_key >>> # information about online redo logs define rc_redo_log <<< create or replace view rc_redo_log as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, orl.thread#, orl.group#, orl.fname name, orl.site_key, orl.bytes bytes, orl.type type from dbinc, orl where dbinc.dbinc_key = orl.dbinc_key >>> # information about redo log history define rc_log_history <<< create or replace view rc_log_history as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, rlh.rlh_recid recid, rlh.rlh_stamp stamp, rlh.thread#, rlh.sequence#, rlh.low_scn first_change#, rlh.low_time first_time, rlh.next_scn next_change#, decode(rlh.status, 'N', 'NO', 'Y', 'YES', NULL, NULL, '?') cleared -- rlh.next_time, -- rlh.blocks from dbinc, rlh where dbinc.dbinc_key = rlh.dbinc_key >>> # archived logs # information about all archivelogs define rc_archived_log <<< create or replace view rc_archived_log as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, al.al_key, al.al_recid recid, al.al_stamp stamp, al.fname name, al.thread#, al.sequence#, dbinc.reset_scn resetlogs_change#, dbinc.reset_time resetlogs_time, al.low_scn first_change#, al.low_time first_time, al.next_scn next_change#, al.next_time, al.blocks, al.block_size, al.completion_time, decode(al.archived, 'N', 'NO', 'Y', 'YES', '?') archived, al.status, decode(al.is_standby, 'Y', 'YES', 'NO') is_standby, al.dictionary_begin, al.dictionary_end, al.is_recovery_dest_file, al.compressed, al.creator, al.terminal, al.site_key from dbinc, al where dbinc.dbinc_key = al.dbinc_key >>> # backup sets define rc_backup_set <<< create or replace view rc_backup_set as select db.db_key, db.db_id, bs.bs_key, bs.bs_recid recid, bs.bs_stamp stamp, bs.set_stamp, bs.set_count, bs.bck_type backup_type, bs.incr_level incremental_level, bs.pieces, bs.start_time, bs.completion_time, abs((bs.completion_time - bs.start_time) * 86400) elapsed_seconds, bs.status, bs.controlfile_included, bs.input_file_scan_only, decode(keep_options, 0, 'NO', 'YES') keep, keep_until, decode(keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, bs.block_size, bs.site_key, decode(bs.multi_section, 'Y', 'YES', 'NO') multi_section from db, bs where db.db_key = bs.db_key >>> # backup pieces define rc_backup_piece <<< create or replace view rc_backup_piece as select db.db_key, db.db_id, bp.bp_key, bp.bp_recid recid, bp.bp_stamp stamp, bs.bs_key, bs.set_stamp, bs.set_count, bs.bck_type backup_type, bs.incr_level incremental_level, bp.piece#, bp.copy#, bp.device_type, bp.handle, bp.comments, bp.media, bp.media_pool, decode(bp.concur, 'N', 'NO', 'Y', 'YES', '?') concur, bp.tag, bp.start_time, bp.completion_time, abs((bp.completion_time - bp.start_time) * 86400) elapsed_seconds, bp.status, bp.bytes, bp.is_recovery_dest_file, bp.rsr_key, bp.compressed, bp.site_key, decode(bp.encrypted, 'N', 'NO', 'Y', 'YES', '?') encrypted, decode(bp.backed_by_osb, 'N', 'NO', 'Y', 'YES', '?') backed_by_osb from db, bs, bp where db.db_key = bs.db_key and bs.bs_key = bp.bs_key >>> # datafile backups (in backup sets) define rc_backup_datafile <<< create or replace view rc_backup_datafile as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, bdf.bdf_key, bdf.bdf_recid recid, bdf.bdf_stamp stamp, bs.bs_key, bs.set_stamp, bs.set_count, bs.bs_recid, bs.bs_stamp, bs.bck_type backup_type, bdf.incr_level incremental_level, bdf.completion_time, bdf.file#, bdf.create_scn creation_change#, bdf.create_time creation_time, dbinc.reset_scn resetlogs_change#, dbinc.reset_time resetlogs_time, bdf.incr_scn incremental_change#, bdf.ckp_scn checkpoint_change#, bdf.ckp_time checkpoint_time, bdf.abs_fuzzy_scn absolute_fuzzy_change#, bdf.datafile_blocks, bdf.blocks, bdf.block_size, bs.status, bs.incr_level bs_level, bs.pieces, bdf.blocks_read, bdf.marked_corrupt, decode(bdf.used_chg_track, 'Y', 'YES', 'NO') used_change_tracking, decode(bdf.used_optim, 'Y', 'YES', 'NO') used_optimization, decode(bdf.used_optim, 'Y',round((100 *(bdf.datafile_blocks - bdf.blocks_read)) / bdf.datafile_blocks), NULL) pct_notread, bdf.foreign_dbid, bdf.plugged_readonly, bdf.plugin_scn plugin_change#, bdf.plugin_reset_scn plugin_resetlogs_change#, bdf.plugin_reset_time plugin_resetlogs_time, bdf.section_size from dbinc, bs, bdf where dbinc.dbinc_key = bdf.dbinc_key and bs.bs_key = bdf.bs_key and bs.bck_type != 'L' >>> # backup control files in backup sets define rc_backup_controlfile <<< create or replace view rc_backup_controlfile as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, bcf.bcf_key, bcf.bcf_recid recid, bcf.bcf_stamp stamp, bs.bs_key, bs.set_stamp, bs.set_count, dbinc.reset_scn resetlogs_change#, dbinc.reset_time resetlogs_time, bcf.ckp_scn checkpoint_change#, bcf.ckp_time checkpoint_time, bcf.create_time creation_time, bcf.block_size, bcf.min_offr_recid oldest_offline_range, bs.status, bs_recid, bs_stamp, bs.incr_level bs_level, bs.completion_time, bcf.controlfile_type, bcf.blocks, bcf.autobackup_date, bcf.autobackup_sequence from dbinc, bs, bcf where dbinc.dbinc_key = bcf.dbinc_key and bs.bs_key = bcf.bs_key and bs.bck_type != 'L' >>> # backup of SPFILEs in backup sets define rc_backup_spfile <<< create or replace view rc_backup_spfile as select db.db_key, bsf.bsf_key, bsf.bsf_recid recid, bsf.bsf_stamp stamp, bs.bs_key, bs.set_stamp, bs.set_count, bsf.modification_time, bs.status, bs_recid, bs_stamp, bs.completion_time, bsf.bytes, bsf.db_unique_name from db, bs, bsf where db.db_key = bsf.db_key and bs.bs_key = bsf.bs_key and bs.bck_type != 'L' >>> # datafile copies (on disk) define rc_datafile_copy <<< create or replace view rc_datafile_copy as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, cdf.cdf_key, cdf.cdf_recid recid, cdf.cdf_stamp stamp, cdf.fname name, cdf.tag, cdf.file#, cdf.create_scn creation_change#, cdf.create_time creation_time, dbinc.reset_scn resetlogs_change#, dbinc.reset_time resetlogs_time, cdf.incr_level incremental_level, cdf.ckp_scn checkpoint_change#, cdf.ckp_time checkpoint_time, cdf.abs_fuzzy_scn absolute_fuzzy_change#, cdf.rcv_fuzzy_scn recovery_fuzzy_change#, cdf.rcv_fuzzy_time recovery_fuzzy_time, decode(cdf.onl_fuzzy,'N', 'NO', 'Y', 'YES', '?') online_fuzzy, decode(cdf.bck_fuzzy,'N', 'NO', 'Y', 'YES', '?') backup_fuzzy, cdf.blocks, cdf.block_size, cdf.completion_time, cdf.status, decode(keep_options, 0, 'NO', 'YES') keep, keep_until, decode(keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, decode(cdf.scanned,'N', 'NO', 'Y', 'YES', '?') scanned, cdf.is_recovery_dest_file, cdf.rsr_key, cdf.marked_corrupt, cdf.site_key, cdf.foreign_dbid, cdf.plugged_readonly, cdf.plugin_scn plugin_change#, cdf.plugin_reset_scn plugin_resetlogs_change#, cdf.plugin_reset_time plugin_resetlogs_time from dbinc, cdf where dbinc.dbinc_key = cdf.dbinc_key >>> define rc_controlfile_copy <<< create or replace view rc_controlfile_copy as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, ccf.ccf_key, ccf.ccf_recid recid, ccf.ccf_stamp stamp, ccf.fname name, ccf.tag, dbinc.reset_scn resetlogs_change#, dbinc.reset_time resetlogs_time, ccf.ckp_scn checkpoint_change#, ccf.ckp_time checkpoint_time, ccf.create_time creation_time, -- ccf.ckp_time checkpoint_time, ccf.blocks, ccf.block_size, ccf.min_offr_recid, ccf.min_offr_recid oldest_offline_range, ccf.completion_time, ccf.status, ccf.controlfile_type, decode(keep_options, 0, 'NO', 'YES') keep, keep_until, decode(keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, ccf.is_recovery_dest_file, ccf.rsr_key, ccf.site_key from dbinc, ccf where dbinc.dbinc_key = ccf.dbinc_key >>> # redo log backups (in backup sets) define rc_backup_redolog <<< create or replace view rc_backup_redolog as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, brl.brl_key, brl.brl_recid recid, brl.brl_stamp stamp, bs.bs_key, bs.set_stamp, bs.set_count, bs.bck_type backup_type, bs.completion_time, brl.thread#, brl.sequence#, dbinc.reset_scn resetlogs_change#, dbinc.reset_time resetlogs_time, brl.low_scn first_change#, brl.low_time first_time, brl.next_scn next_change#, brl.next_time, brl.blocks, brl.block_size, bs.status, bs_recid, bs_stamp, bs.pieces, brl.terminal from dbinc, bs, brl where dbinc.dbinc_key = brl.dbinc_key and bs.bs_key = brl.bs_key and bs.bck_type = 'L' >>> # corrupt blocks in datafile backups and copies define rc_backup_corruption <<< create or replace view rc_backup_corruption as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, bcb.bcb_recid recid, bcb.bcb_stamp stamp, bs.bs_key, bs.set_stamp, bs.set_count, bcb.piece#, bdf.bdf_key, bdf.bdf_recid, bdf.bdf_stamp, bdf.file#, bdf.create_scn creation_change#, bcb.block#, bcb.blocks, bcb.corrupt_scn corruption_change#, decode(bcb.marked_corrupt,'N', 'NO', 'Y', 'YES', '?') marked_corrupt, bcb.corruption_type from dbinc, bs, bdf, bcb where dbinc.dbinc_key = bdf.dbinc_key and bs.bs_key = bdf.bs_key and bdf.bdf_key = bcb.bdf_key >>> define rc_copy_corruption <<< create or replace view rc_copy_corruption as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, ccb.ccb_recid recid, ccb.ccb_stamp stamp, cdf.cdf_key, cdf.cdf_recid copy_recid, cdf.cdf_stamp copy_stamp, cdf.file#, cdf.create_scn creation_change#, ccb.block#, ccb.blocks, ccb.corrupt_scn corruption_change#, decode(ccb.marked_corrupt,'N', 'NO', 'Y', 'YES', '?') marked_corrupt, ccb.corruption_type from dbinc, cdf, ccb where dbinc.dbinc_key = cdf.dbinc_key and cdf.cdf_key = ccb.cdf_key and cdf.status = 'A' >>> # offline ranges for datafiles define rc_offline_range <<< create or replace view rc_offline_range as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, offr.offr_recid recid, offr.offr_stamp stamp, offr.file#, offr.create_scn creation_change#, offr.offline_scn offline_change#, offr.online_scn online_change#, offr.online_time, offr.cf_create_time from dbinc, offr where dbinc.dbinc_key = offr.dbinc_key >>> # stored scripts define rc_stored_script <<< create or replace view rc_stored_script as select db.db_key, nvl(dbinc.db_name, 'GLOBAL') db_name, scr.scr_name script_name, scr.scr_comment script_comment from db, dbinc, scr where dbinc.dbinc_key(+) = db.curr_dbinc_key and db.db_key(+) = scr.db_key >>> define rc_stored_script_line <<< create or replace view rc_stored_script_line as select db.db_key, scr.scr_name script_name, scrl.linenum line, scrl.text from db, scr, scrl where db.db_key(+) = scr.db_key and scr.scr_key = scrl.scr_key >>> # proxy copies define rc_proxy_datafile <<< create or replace view rc_proxy_datafile as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, xdf.xdf_key, xdf.xdf_recid recid, xdf.xdf_stamp stamp, xdf.tag, xdf.file#, xdf.create_scn creation_change#, xdf.create_time creation_time, dbinc.reset_scn resetlogs_change#, dbinc.reset_time resetlogs_time, xdf.incr_level incremental_level, xdf.ckp_scn checkpoint_change#, xdf.ckp_time checkpoint_time, xdf.abs_fuzzy_scn absolute_fuzzy_change#, xdf.rcv_fuzzy_scn recovery_fuzzy_change#, xdf.rcv_fuzzy_time recovery_fuzzy_time, decode(xdf.onl_fuzzy,'N', 'NO', 'Y', 'YES', '?') online_fuzzy, decode(xdf.bck_fuzzy,'N', 'NO', 'Y', 'YES', '?') backup_fuzzy, xdf.blocks, xdf.block_size, xdf.device_type, xdf.handle, xdf.comments, xdf.media, xdf.media_pool, xdf.start_time, xdf.completion_time, abs((xdf.completion_time - xdf.start_time) * 86400) elapsed_seconds, xdf.status, decode(keep_options, 0, 'NO', 'YES') keep, keep_until, decode(keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, xdf.rsr_key, xdf.site_key, xdf.foreign_dbid, xdf.plugged_readonly, xdf.plugin_scn plugin_change#, xdf.plugin_reset_scn plugin_resetlogs_change#, xdf.plugin_reset_time plugin_resetlogs_time from dbinc, xdf where dbinc.dbinc_key = xdf.dbinc_key >>> define rc_proxy_controlfile <<< create or replace view rc_proxy_controlfile as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, xcf.xcf_key, xcf.xcf_recid recid, xcf.xcf_stamp stamp, xcf.tag, dbinc.reset_scn resetlogs_change#, dbinc.reset_time resetlogs_time, xcf.ckp_scn checkpoint_change#, xcf.ckp_time checkpoint_time, xcf.create_time creation_time, xcf.block_size, xcf.blocks, xcf.min_offr_recid, xcf.min_offr_recid oldest_offline_range, xcf.device_type, xcf.handle, xcf.comments, xcf.media, xcf.media_pool, xcf.start_time, xcf.completion_time, abs((xcf.completion_time - xcf.start_time) * 86400) elapsed_seconds, xcf.status, xcf.controlfile_type, decode(keep_options, 0, 'NO', 'YES') keep, keep_until, decode(keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, xcf.rsr_key, xcf.site_key from dbinc, xcf where dbinc.dbinc_key = xcf.dbinc_key >>> define rc_proxy_archivedlog <<< create or replace view rc_proxy_archivedlog as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, xal.xal_key, xal.xal_recid recid, xal.xal_stamp stamp, xal.tag, xal.device_type, xal.handle, xal.comments, xal.media, xal.media_pool, xal.status, xal.thread#, xal.sequence#, dbinc.reset_scn resetlogs_change#, dbinc.reset_time resetlogs_time, xal.low_scn first_change#, xal.low_time first_time, xal.next_scn next_change#, xal.next_time, xal.blocks, xal.block_size, xal.start_time, xal.completion_time, abs((xal.completion_time - xal.start_time) * 86400) elapsed_seconds, xal.rsr_key, xal.terminal, decode(keep_options, 0, 'NO', 'YES') keep, keep_until, decode(keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, xal.site_key from dbinc, xal where dbinc.dbinc_key = xal.dbinc_key >>> # This view displays the data block corruptions that currently exist in the # database. It does this by combining the blocks that were found during # the creation of datafile copies and datafile backup sets (the 'outer' query), # and then filtering out those rows for which some subsequent backup or copy # of the same datafile has occurred (the 'not exists' query). When considering # subsequent datafile copies, the only ones that can invalidate a given # corruption row are those which were created by a full scan of the file. define rc_database_block_corruption <<< create or replace view rc_database_block_corruption as select node.db_key db_key, df.dbinc_key dbinc_key, df.file# file#, bcr.block# block#, bcr.blocks blocks, bcr.corrupt_scn corruption_change#, bcr.corruption_type corruption_type from bcr, df, (select dbinc_key from dbinc where dbinc_status = 'CURRENT') dbinc, (select distinct db_key, site_key from node where bcr_in_use = 'YES') node where bcr.df_key = df.df_key and node.site_key = bcr.site_key and df.drop_scn is null and df.dbinc_key = dbinc.dbinc_key union all select distinct outer.db_key db_key, outer.dbinc_key dbinc_key, outer.file# file#, outer.block# block#, outer.blocks blocks, outer.corruption_change# corruption_change#, outer.corruption_type corruption_type from (select db_key, dbinc_key, file#, block#, blocks, corruption_change#, copy_stamp stamp, corruption_type from rc_copy_corruption union select bs.db_key, dbinc_key, file#, block#, blocks, corruption_change#, bs.stamp, corruption_type from rc_backup_corruption bc, rc_backup_set bs where bc.bs_key = bs.bs_key) outer, (select distinct db_key from node where bcr_in_use = 'NO') node where outer.db_key = node.db_key and not exists (select 1 from rc_datafile_copy where outer.db_key = db_key and outer.dbinc_key = dbinc_key and scanned = 'YES' and outer.file# = file# and outer.stamp < stamp union select 1 from rc_backup_datafile bdf, rc_backup_set bs where bdf.bs_key = bs.bs_key and outer.db_key = bdf.db_key and outer.dbinc_key = bdf.dbinc_key and outer.file# = file# and outer.stamp < bs.stamp and (datafile_blocks = blocks_read or (nvl(bdf.incremental_level,0) = 0 and used_optimization='YES'))) >>> define drop_rc_lbRecVar_t <<< drop type rc_lbRecVar_t >>> define drop_rc_lbRecSet_t <<< drop type rc_lbRecSet_t >>> define drop_rc_lbRec_t <<< drop type rc_lbRec_t >>> # obsolete column is at 19 position in this object and the object # implementation performs some optimization based on whether user selected # obsolete column (see Fetch function). If you happen to add a element in # this object before 19th position, you should fix the Fetch function also. define rc_lbRec_t <<< create or replace type rc_lbRec_t authid current_user as object ( list_order1 NUMBER, list_order2 NUMBER, pkey NUMBER, backup_type VARCHAR2(32), file_type VARCHAR2(32), keep VARCHAR2(3), keep_until DATE, keep_options VARCHAR2(13), status VARCHAR2(16), fname VARCHAR2(1024), tag VARCHAR2(32), media VARCHAR2(80), recid NUMBER, stamp NUMBER, device_type VARCHAR2(255), block_size NUMBER, completion_time DATE, is_rdf VARCHAR2(3), compressed VARCHAR2(3), obsolete VARCHAR2(3), bytes NUMBER, bs_key NUMBER, bs_count NUMBER, bs_stamp NUMBER, bs_type VARCHAR2(32), bs_incr_type VARCHAR2(32), bs_pieces NUMBER, bs_copies NUMBER, bs_completion_time DATE, bs_status VARCHAR2(16), bs_bytes NUMBER, bs_compressed VARCHAR2(3), bs_tag VARCHAR2(1024), bs_device_type VARCHAR2(255), bp_piece# NUMBER, bp_copy# NUMBER, df_file# NUMBER, df_tablespace VARCHAR2(30), df_resetlogs_change# NUMBER, df_creation_change# NUMBER, df_checkpoint_change# NUMBER, df_ckp_mod_time DATE, df_incremental_change# NUMBER, rl_thread# NUMBER, rl_sequence# NUMBER, rl_resetlogs_change# NUMBER, rl_first_change# NUMBER, rl_first_time DATE, rl_next_change# NUMBER, rl_next_time DATE ) >>> define rc_lbRecSet_t <<< create or replace type rc_lbRecSet_t as table of rc_lbRec_t >>> define drop_rc_lbRecSetImpl_t <<< drop type rc_lbRecSetImpl_t >>> # ---------------------------------------------------------------------------- # rc_lbRecSetImpl_t -- # NOTE!! NOTE!! NOTE!! -- # There are two versions of object implementation for %%_backup_files view.-- # One is for recovery catalog and another for target database version. -- # If there is any change in logic, it should be reflected in both places -- # ---------------------------------------------------------------------------- define rc_lbRecSetImpl_t <<< create or replace type rc_lbRecSetImpl_t authid current_user as object ( curval number, -- current rownum done number, -- done with the query needobsolete number, -- user interested in obsolete col static function ODCITableStart(sctx IN OUT rc_lbRecSetImpl_t) return number, member function ODCITableFetch(self IN OUT rc_lbRecSetImpl_t, nrows IN number, objSet OUT rc_lbRecSet_t) return number, member function ODCITableClose(self IN rc_lbRecSetImpl_t) return number ) >>> define rc_lbRecSetImplbody_t <<< create or replace type body rc_lbRecSetImpl_t is -- Get the version of ODCITablePrepare from target database once the -- lowest compatibility is greater than 10.1. This restriction is required -- because the function was first implemented in 10.1. static function ODCITableStart(sctx IN OUT rc_lbRecSetImpl_t) return number is begin -- instantiate the object and initialise curval, done and needobsolete. sctx:=rc_lbRecSetImpl_t(0, 0, 1); return SYS.ODCIConst.Success; end ODCITableStart; -- Fetch function is not called more than once. It returns all rows when -- called first time for each query because we can not have package composite -- types within object definition. For the same reason, the nrows parameter -- is ignored. member function ODCITableFetch(self IN OUT rc_lbRecSetImpl_t, nrows IN number, objSet OUT rc_lbRecSet_t) return number is n number := 0; firstCall boolean := TRUE; ret boolean := TRUE; redundancy number; recovery_window number; untilTime date; lbRec dbms_rcvman.lbrec_t; lbCursor dbms_rcvman.lbCursor_t; lbState dbms_rcvman.lbState_t; begin objSet:=rc_lbRecSet_t(); -- reset package state dbms_rcvman.resetAll; redundancy := 1; recovery_window := 0; -- We need to get the retention policy, and to set untilTime if -- retention policy is recovery_window. -- Get retention policy (recovery window and redunadcy). dbms_rcvman.getRetentionPolicy(recovery_window, redundancy); -- Always work with all incarnations. dbms_rcvman.setAllIncarnations(TRUE); -- Set untilTime and untilSCN for recovery window (if any). if (recovery_window > 0) then SELECT (sysdate-recovery_window) INTO untilTime from dual; dbms_rcvman.setUntilTime(untilTime); end if; dbms_rcvman.setDeviceTypeAny; if (recovery_window = 0 and redundancy = 0) then -- don't need obsolete data if there the policy is NONE dbms_rcvman.setNeedObsoleteData(false); else if self.needobsolete = 1 then dbms_rcvman.setNeedObsoleteData(true); else dbms_rcvman.setNeedObsoleteData(false); end if; end if; while ret and self.done = 0 loop ret := dbms_rcvman.listBackup(lbRec, firstCall, FALSE, redundancy, TRUE, lbCursor, lbState, null); if (lbRec.pkey is not null) then objSet.extend; n := n + 1; objSet(n):= rc_lbRec_t( to_number(null), -- list_order1 to_number(null), -- list_order2 to_number(null), -- pkey to_char(null), -- backup_type to_char(null), -- file_type to_char(null), -- keep to_date(null), -- keep_until to_char(null), -- keep_options to_char(null), -- status to_char(null), -- fname to_char(null), -- tag to_char(null), -- media to_number(null), -- recid to_number(null), -- stamp to_char(null), -- device_type to_number(null), -- block_size to_date(null), -- completion_time to_char(null), -- is_rdf to_char(null), -- compressed to_char(null), -- obsolete to_number(null), -- bytes to_number(null), -- bs_key to_number(null), -- bs_count to_number(null), -- bs_stamp to_char(null), -- bs_type to_char(null), -- bs_incr_type to_number(null), -- bs_pieces to_number(null), -- bs_copies to_date(null), -- bs_completion_time to_char(null), -- bs_status to_number(null), -- bs_bytes to_char(null), -- bs_compressed to_char(null), -- bs_tag to_char(null), -- bs_device_type to_number(null), -- bp_piece# to_number(null), -- bp_copy# to_number(null), -- df_file# to_char(null), -- df_tablespace to_number(null), -- df_resetlogs_change# to_number(null), -- df_creation_change# to_number(null), -- df_checkpoint_change# to_date(null), -- df_ckp_mod_time to_number(null), -- df_incremental_change# to_number(null), -- rl_thread# to_number(null), -- rl_sequence# to_number(null), -- rl_resetlogs_change# to_number(null), -- rl_first_change# to_date(null), -- rl_first_time to_number(null), -- rl_next_change# to_date(null)); -- rl_next_time objSet(n).list_order1 := lbRec.list_order1; objSet(n).list_order2 := lbRec.list_order2; objSet(n).pkey := lbRec.pkey; objSet(n).backup_type := lbRec.backup_type; objSet(n).file_type := lbRec.file_type; objSet(n).keep := lbRec.keep; objSet(n).keep_until := lbRec.keep_until; objSet(n).keep_options := lbRec.keep_options; objSet(n).status := lbRec.status; objSet(n).fname := lbRec.fname; objSet(n).tag := lbRec.tag; objSet(n).media := lbRec.media; objSet(n).recid := lbRec.stamp; objSet(n).stamp := lbRec.stamp; objSet(n).device_type := lbRec.device_type; objSet(n).block_size := lbRec.block_size; objSet(n).completion_time := lbRec.completion_time; objSet(n).is_rdf := lbRec.is_rdf; objSet(n).compressed := lbRec.compressed; objSet(n).obsolete := lbRec.obsolete; objSet(n).bytes := lbRec.bytes; objSet(n).bs_key := lbRec.bs_key; objSet(n).bs_count := lbRec.bs_count; objSet(n).bs_stamp := lbRec.bs_stamp; objSet(n).bs_type := lbRec.bs_type; objSet(n).bs_incr_type := lbRec.bs_incr_type; objSet(n).bs_pieces := lbRec.bs_pieces; objSet(n).bs_copies := lbRec.bs_copies; objSet(n).bs_completion_time := lbRec.bs_completion_time; objSet(n).bs_status := lbRec.bs_status; objSet(n).bs_bytes := lbRec.bs_bytes; objSet(n).bs_compressed := lbRec.bs_compressed; objSet(n).bs_tag := lbRec.bs_tag; objSet(n).bs_device_type := lbRec.bs_device_type; objSet(n).bp_piece# := lbRec.bp_piece#; objSet(n).bp_copy# := lbRec.bp_copy#; objSet(n).df_file# := lbRec.df_file#; objSet(n).df_tablespace := lbRec.df_tablespace; objSet(n).df_resetlogs_change# := lbRec.df_resetlogs_change#; objSet(n).df_creation_change# := lbRec.df_creation_change#; objSet(n).df_checkpoint_change# := lbRec.df_checkpoint_change#; objSet(n).df_ckp_mod_time := lbRec.df_ckp_mod_time; objSet(n).df_incremental_change# := lbRec.df_incremental_change#; objSet(n).rl_thread# := lbRec.rl_thread#; objSet(n).rl_sequence# := lbRec.rl_sequence#; objSet(n).rl_resetlogs_change# := lbRec.rl_resetlogs_change#; objSet(n).rl_first_change# := lbRec.rl_first_change#; objSet(n).rl_first_time := lbRec.rl_first_time; objSet(n).rl_next_change# := lbRec.rl_next_change#; objSet(n).rl_next_time := lbRec.rl_next_time; end if; firstCall := false; self.curval:=self.curval+1; if not ret then self.done := 1; end if; end loop; return SYS.ODCIConst.Success; end ODCITableFetch; member function ODCITableClose(self IN rc_lbRecSetImpl_t) return number is begin return SYS.ODCIConst.Success; end ODCITableClose; end; >>> define drop_rc_listBackupPipe <<< drop function rc_listBackupPipe >>> define rc_listBackupPipe <<< CREATE OR REPLACE FUNCTION rc_listBackupPipe RETURN rc_lbRecSet_t PIPELINED using rc_lbRecSetImpl_t; >>> # # View for listing all backups. The view is based on dbsm_rcvman.listBackupPipe # pipelined function. The output are rows of type lbRec_t. # NOTE: The following elemnts from lbRect_t should not be in the view: # - is_rdf # - list_order # - df_incremental_change# define rc_backup_files <<< create or replace view rc_backup_files as select pkey, backup_type, file_type, keep, keep_until, keep_options, status, fname, tag, media, recid, stamp, device_type, block_size, completion_time, compressed, obsolete, bytes, bs_key, bs_count, bs_stamp, bs_type, bs_incr_type, bs_pieces, bs_copies, bs_completion_time, bs_status, bs_bytes, bs_compressed, bs_tag, bs_device_type, bp_piece#, bp_copy#, df_file#, df_tablespace, df_resetlogs_change#, df_creation_change#, df_checkpoint_change#, df_ckp_mod_time, rl_thread#, rl_sequence#, rl_resetlogs_change#, rl_first_change#, rl_first_time, rl_next_change#, rl_next_time from TABLE(rc_listBackupPipe) >>> # The view rc_rman_status represent the history of the jobs define rc_rman_status <<< create or replace view rc_rman_status as select dbinc.db_key, dbinc.dbinc_key, dbinc.db_name, rsr.rsr_recid recid, rsr.rsr_stamp stamp, rsr.rsr_key rsr_key, rsr.rsr_pkey parent_key, nvl(rsr.rsr_l0key, rsr.rsr_key) session_key, rsr.rsr_type row_type, rsr.rsr_level row_level, rsr.rsr_oper operation, rsr.rsr_status status, rsr.rsr_cmdid command_id, rsr.rsr_mbytes mbytes_processed, rsr.rsr_start start_time, rsr.rsr_end end_time, nvl(rsr.rsr_l0key, rsr.rsr_key) job_key, rsr.rsr_ibytes input_bytes, rsr.rsr_obytes output_bytes, rsr.rsr_optimized optimized, rsr.rsr_otype object_type, rsr.rsr_srecid session_recid, rsr.rsr_sstamp session_stamp, rsr.rsr_odevtype output_device_type, rsr.site_key site_key, decode(rsr.rsr_osb_allocated, 'Y', 'YES', 'NO') osb_allocated from dbinc, rsr where dbinc.dbinc_key = rsr.dbinc_key >>> define drop_rc_rout <<< drop table rout >>> define drop_rc_rman_output <<< drop view rc_rman_output >>> # The view rc_rman_output represent the rman outputs define rc_rman_output <<< create or replace view rc_rman_output as select db_key, rsr_key, rout_skey session_key, rout_recid recid, rout_stamp stamp, rout_text output from rout >>> # Define view to get subjob details define rc_rman_backup_subjob_details <<< create or replace view RC_RMAN_BACKUP_SUBJOB_DETAILS as select a.*, decode(nvl(b.autocnt,0), 0, 'NO', 'YES') autobackup_done, decode(status_weight, 2000, 'FAILED', 1900, 'RUNNING WITH ERRORS', 1500, 'RUNNING WITH WARNINGS', 1001, 'RUNNING', 900, 'COMPLETED WITH ERRORS', 500, 'COMPLETED WITH WARNINGS', 001, 'COMPLETED', 'FAILED') status, decode(input_type_weight,9, 'DB FULL', 8, 'RECVR AREA', 7, 'DB INCR', 6, 'DATAFILE FULL', 5, 'DATAFILE INCR', 4, 'ARCHIVELOG', 3, 'CONTROLFILE', 2, 'SPFILE', 1, 'BACKUPSET', null) input_type, decode(optimized_weight, 1, 'YES', 'NO') optimized, nvl(b.autocnt,0) autobackup_count, case when input_bytes/decode(output_bytes,0,null,output_bytes)>1 then input_bytes/decode(output_bytes,0,null,output_bytes) else 1 end compression_ratio, dbms_rcvman.Num2DisplaySize(input_bytes) input_bytes_display, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display from ( select unique db_key, db_name, session_key, session_recid, session_stamp, operation, command_id, min(start_time) over (partition by session_key, operation) start_time, max(end_time) over (partition by session_key, operation) end_time, sum(input_bytes) over (partition by session_key, operation) input_bytes, sum(output_bytes) over (partition by session_key, operation) output_bytes, max(status_weight) over (partition by session_key, operation)status_weight, max(optimized_weight) over (partition by session_key, operation) optimized_weight, max(input_type_weight) over (partition by session_key, operation) input_type_weight, decode(count(distinct output_device_type) over (partition by session_key, operation),1, first_value(output_device_type) over (partition by session_key, operation),0, null, '*') output_device_type, decode(count(distinct osb_allocated) over (partition by session_key, operation),1, first_value(osb_allocated) over (partition by session_key, operation),0, 'NO', '*') backed_by_osb from (select d.*, decode(status, 'RUNNING', 1001, 'RUNNING WITH WARNINGS', 1500, 'RUNNING WITH ERRORS', 1900, 'COMPLETED', 0001, 'COMPLETED WITH WARNINGS', 500, 'COMPLETED WITH ERRORS', 900, 'FAILED', 2000, 2000) status_weight, decode(optimized,'YES', 1, 0) optimized_weight, decode(object_type, 'DB FULL', 9, 'RECVR AREA', 8, 'DB INCR', 7, 'DATAFILE FULL', 6, 'DATAFILE INCR', 5, 'ARCHIVELOG', 4, 'CONTROLFILE', 3, 'SPFILE', 2, 'BACKUPSET', 1, 0) input_type_weight from rc_rman_status d where operation like 'BACKUP%' and row_level=1)) a, ( select session_key, count(*) autocnt from rc_rman_status where operation like '%AUTOBACKUP%' and row_level > 1 group by session_key ) b where a.session_key=b.session_key (+) >>> #define views to get job details define rc_rman_backup_job_details <<< create or replace view RC_RMAN_BACKUP_JOB_DETAILS as select a.*, dbms_rcvman.Num2DisplaySize(input_bytes) input_bytes_display, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display, dbms_rcvman.Num2DisplaySize(input_bytes_per_sec) input_bytes_per_sec_display, dbms_rcvman.Num2DisplaySize(output_bytes_per_sec) output_bytes_per_sec_display, dbms_rcvman.Sec2DisplayTime(elapsed_seconds) time_taken_display from (select unique a.*, decode(autobackup_count, 0, 'NO', 'YES') autobackup_done, decode(status_weight, 2000, 'FAILED', 1900, 'RUNNING WITH ERRORS', 1500, 'RUNNING WITH WARNINGS', 1001, 'RUNNING', 900, 'COMPLETED WITH ERRORS', 500, 'COMPLETED WITH WARNINGS', 001, 'COMPLETED', 'FAILED') status, decode(input_type_weight,9, 'DB FULL', 8, 'RECVR AREA', 7, 'DB INCR', 6, 'DATAFILE FULL', 5, 'DATAFILE INCR', 4, 'ARCHIVELOG', 3, 'CONTROLFILE', 2, 'SPFILE', 1, 'BACKUPSET', null) input_type, decode(optimized_weight, 1, 'YES', 'NO') optimized, abs(a.end_time-a.start_time)*86400 elapsed_seconds, case when a.input_bytes/decode(a.output_bytes,0,null,a.output_bytes)>1 then a.input_bytes/decode(a.output_bytes,0,null,a.output_bytes) else 1 end compression_ratio, a.input_bytes/(decode(a.end_time-a.start_time, 0, 1, abs(a.end_time-a.start_time))*86400) input_bytes_per_sec, a.output_bytes/(decode(a.end_time-a.start_time, 0, 1, abs(a.end_time-a.start_time))*86400) output_bytes_per_sec from (select db_key, db_name, session_key, session_recid, session_stamp, command_id, min(start_time) over (partition by session_key) start_time, max(end_time) over (partition by session_key) end_time, sum(input_bytes) over (partition by session_key) input_bytes, sum(output_bytes) over (partition by session_key) output_bytes, max(status_weight) over (partition by session_key)status_weight, max(optimized_weight) over (partition by session_key) optimized_weight, max(input_type_weight) over (partition by session_key) input_type_weight, decode(count(distinct output_device_type) over (partition by session_key),1, first_value(output_device_type) over (partition by session_key),0, null, '*') output_device_type, sum(autobackup_count) over (partition by session_key) autobackup_count, backed_by_osb from RC_RMAN_BACKUP_SUBJOB_DETAILS) a) a >>> #define views to get backup set details define rc_backup_set_details <<< create or replace view RC_BACKUP_SET_DETAILS as select unique b.session_key, b.session_recid, b.session_stamp, a.db_key, f.db_name, a.bs_key, a.RECID, a.stamp, a.set_stamp, a.set_count, a.backup_type, a.controlfile_included, a.incremental_level, a.pieces, a.start_time, a.completion_time, a.elapsed_seconds, a.block_size, a.keep, a.keep_until, a.keep_options, a.device_type, a.compressed, a.num_copies, a.output_bytes, a.original_input_bytes, case when a.compression_ratio>1 then a.compression_ratio else 1 end compression_ratio, 'A' status, a.original_inprate_bytes, a.output_rate_bytes, dbms_rcvman.Num2DisplaySize(original_input_bytes) original_input_bytes_display, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display, dbms_rcvman.Num2DisplaySize(original_inprate_bytes) original_inprate_bytes_display, dbms_rcvman.Num2DisplaySize(output_rate_bytes) output_rate_bytes_display, dbms_rcvman.Sec2DisplayTime(elapsed_seconds) time_taken_display, a.encrypted, a.backed_by_osb from ( select unique a.*, b.rsr_key, decode(b.devcnt, 1, first_value(b.device_type) over (partition by b.bs_key), '*') device_type, b.compressed, count(distinct copy#) over (partition by b.bs_key) num_copies, b.output_bytes output_bytes, c.original_input_bytes, c.original_input_bytes / (decode(b.output_bytes,0,c.original_input_bytes, b.output_bytes)) compression_ratio, c.original_input_bytes/ (decode(a.elapsed_seconds, 0, 1, a.elapsed_seconds)) original_inprate_bytes, b.output_bytes/ (decode(a.elapsed_seconds, 0, 1, a.elapsed_seconds)) output_rate_bytes, b.encrypted, b.backed_by_osb from rc_backup_set a, (select bs_key, device_type, status, count(distinct device_type) over (partition by bs_key)devcnt, compressed, sum(bytes) over (partition by bs_key, copy#) output_bytes, copy#, rsr_key, count(piece#) over (partition by bs_key, copy#) npieces, encrypted, backed_by_osb from rc_backup_piece where status = 'A') b, ( select bs_key, sum(original_input_bytes) original_input_bytes from ( select bs_key, sum((datafile_blocks+1)*block_size) over (partition by bs_key) original_input_bytes from rc_backup_datafile union select bs_key, sum((blocks+1)*block_size) over (partition by bs_key) original_input_bytes from rc_backup_controlfile union select bs_key, sum(bytes) over (partition by bs_key) original_input_bytes from rc_backup_spfile ) group by bs_key union select bs_key, sum((blocks+1)*block_size) over (partition by bs_key) original_input_bytes from rc_backup_redolog ) c where a.bs_key=b.bs_key and a.bs_key=c.bs_key and a.pieces=b.npieces ) a, (select session_key, session_recid, session_stamp, recid, stamp, rsr_key, start_time, end_time, db_key, db_name from rc_rman_status) b, (select db_key, name "DB_NAME" from rc_database) f, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.rsr_key = b.rsr_key (+) and a.db_key = f.db_key and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) >>> #define views to get backup piece details -- output_bytes does not include #header block and piece directory structures. define rc_backup_piece_details <<< create or replace view RC_BACKUP_PIECE_DETAILS as select unique b.session_key,b.session_recid,b.session_stamp, a.*, dbms_rcvman.Num2DisplaySize(bytes) size_bytes_display from (select f.db_name, c.* from rc_backup_set a, (select rc_backup_piece.*, count(piece#) over (partition by bs_key, copy#) pieces_per_set from rc_backup_piece where status = 'A') c, (select db_key, name "DB_NAME" from rc_database) f where a.bs_key = c.bs_key and a.db_key = f.db_key and a.pieces = c.pieces_per_set) a, (select session_key, session_recid, session_stamp, recid, stamp, rsr_key, start_time, end_time, db_key, db_name from rc_rman_status) b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) >>> #define views to get backup copy details define rc_backup_copy_details <<< create or replace view RC_BACKUP_COPY_DETAILS as select a.*, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display from (select b.session_key, b.session_recid, b.session_stamp, a.* from ((select db_key, db_name, rsr_key, cdf_key copy_key, file#, name, tag, creation_change#, creation_time, checkpoint_change#, checkpoint_time, marked_corrupt, (blocks+1)*block_size output_bytes, completion_time, null controlfile_type, keep, keep_until, keep_options, is_recovery_dest_file from rc_datafile_copy where status='A') union (select db_key, db_name, rsr_key, ccf_key copy_key, 0, name, tag, null creation_change#, creation_time, checkpoint_change#, checkpoint_time, null, (blocks +1)*block_size output_bytes, completion_time, controlfile_type, keep, keep_until, keep_options, is_recovery_dest_file from rc_controlfile_copy where status='A')) a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time))a >>> #define views to get proxy copy details define rc_proxy_copy_details <<< create or replace view RC_PROXY_COPY_DETAILS as select a.*, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display from (select b.session_recid session_key, b.session_recid, b.session_stamp, a.* from (select db_key, db_name, rsr_key, xdf_key copy_key, file#, handle,comments, media,media_pool, tag, creation_change#, creation_time, checkpoint_change#, checkpoint_time, (blocks+1)*block_size output_bytes, completion_time, null controlfile_type, keep, keep_until, keep_options from rc_proxy_datafile where status = 'A' union select db_key, db_name, rsr_key, xcf_key copy_key, 0, handle,comments, media,media_pool, tag, null creation_change#, creation_time, checkpoint_change#, checkpoint_time, (blocks+1)*block_size output_bytes, completion_time, controlfile_type, keep, keep_until, keep_options from rc_proxy_controlfile where status='A') a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time))a >>> #define views to get proxy archivelog details define rc_proxy_archivelog_details <<< create or replace view RC_PROXY_ARCHIVELOG_DETAILS as select a.*, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display from (select b.rsr_key session_key, b.session_recid, b.session_stamp, a.db_key, a.db_name, a.recid copy_key, a.thread#, a.sequence#, a.resetlogs_change#, a.resetlogs_time, a.handle, a.media, a.media_pool, a.tag, a.first_change#, a.next_change#, a.first_time, a.next_time, (a.blocks+1)*a.block_size output_bytes, a.completion_time from rc_proxy_archivedlog a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time))a >>> #define views to get backup datafile details define rc_backup_datafile_details <<< CREATE Or REPLACE VIEW RC_BACKUP_DATAFILE_DETAILS AS select a.*, b.ts#, b.name tsname, dbms_rcvman.Num2DisplaySize(filesize) filesize_display from (select unique 'BACKUPSET' btype, b.bs_key btype_key, b.session_key, b.session_recid, b.session_stamp, b.db_key, b.db_name, a.set_stamp id1, b.set_count id2, file#, creation_change#, creation_time, resetlogs_change#, resetlogs_time, a.incremental_level, incremental_change#, checkpoint_change#, checkpoint_time, marked_corrupt, (datafile_blocks+1)*a.block_size filesize, (datafile_blocks+1)/(blocks+1) compression_ratio from rc_backup_datafile a, rc_backup_set_details b where a.bs_key = b.bs_key union select unique 'IMAGECOPY' btype, a.cdf_key btype_key, b.session_key, b.session_recid, b.session_stamp, a.db_key, a.db_name, a.recid, a.stamp, file#, creation_change#, creation_time, resetlogs_change#, resetlogs_time, incremental_level, 0 incremental_change#, checkpoint_change#, checkpoint_time, marked_corrupt, (blocks+1)*block_size filesize, 1 compression_ratio from rc_datafile_copy a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) union select unique 'PROXYCOPY' btype, a.xdf_key btype_key, b.session_key, b.session_recid, b.session_stamp, a.db_key, a.db_name, a.recid, a.stamp, file#, creation_change#, creation_time, resetlogs_change#, resetlogs_time, incremental_level, 0 incremental_change#, checkpoint_change#, checkpoint_time, null marked_corrupt, (blocks+1)*block_size filesize, 1 compression_ratio from rc_proxy_datafile a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time)) a, (select distinct df.db_key, df.file#, df.ts#, ts.name from rc_datafile df, rc_tablespace ts where ts.ts# = df.ts# and df.db_key=ts.db_key) b where a.file# = b.file#(+) and a.db_key=b.db_key(+) >>> #define views to get backup controlfile details define rc_backup_controlfile_details <<< CREATE Or REPLACE VIEW RC_BACKUP_CONTROLFILE_DETAILS AS select a.*, dbms_rcvman.Num2DisplaySize(filesize) filesize_display from (select unique 'BACKUPSET' btype, b.bs_key btype_key, b.session_key, b.session_recid, b.session_stamp, a.db_key, a.db_name, a.set_stamp id1, b.set_count id2, creation_time, resetlogs_change#, resetlogs_time, checkpoint_change#, checkpoint_time, (a.blocks+1)*a.block_size filesize, 1 compression_ratio from rc_backup_controlfile a, rc_backup_set_details b where a.bs_key = b.bs_key union select unique 'IMAGECOPY' btype, a.ccf_key btype_key, b.session_key, b.session_recid, b.session_stamp, a.db_key, a.db_name, a.recid, a.stamp, creation_time, resetlogs_change#, resetlogs_time, checkpoint_change#, checkpoint_time, (blocks+1)*block_size filesize, 1 compression_ratio from rc_controlfile_copy a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) union select unique 'PROXYCOPY' btype, a.xcf_key btype_key, b.session_key, b.session_recid, b.session_stamp, a.db_key, a.db_name, a.recid, a.stamp, creation_time, resetlogs_change#, resetlogs_time, checkpoint_change#, checkpoint_time, (blocks+1)*block_size filesize, 1 compression_ratio from rc_proxy_controlfile a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_recid) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time))a >>> #define views to get backup archivelog details define rc_backup_archivelog_details <<< CREATE Or REPLACE VIEW RC_BACKUP_ARCHIVELOG_DETAILS AS select a.*, dbms_rcvman.Num2DisplaySize(filesize) filesize_display from (select unique 'BACKUPSET' btype, b.bs_key btype_key, b.session_key, b.session_recid, b.session_stamp, a.db_key, a.db_name, a.set_stamp id1, b.set_count id2, thread#, sequence#, resetlogs_change#, resetlogs_time, first_change#, first_time, next_change#, next_time, (blocks+1)*a.block_size filesize, b.compression_ratio from rc_backup_redolog a, rc_backup_set_details b where a.bs_key = b.bs_key union select unique 'PROXYCOPY', a.xal_key btype_key, session_key, session_recid, session_stamp, a.db_key, a.db_name, a.recid, a.stamp, thread#, sequence#, resetlogs_change#, resetlogs_time, first_change#, first_time, next_change#, next_time, (blocks+1)*block_size filesize, 1 from rc_proxy_archivedlog a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time))a >>> #define views to get backup spfile details define rc_backup_spfile_details <<< CREATE Or REPLACE VIEW RC_BACKUP_SPFILE_DETAILS AS select unique b.session_recid session_key, b.session_recid, b.session_stamp, b.db_key, b.db_name, b.recid bs_key, a.set_stamp, b.set_count, modification_time, a.bytes filesize, dbms_rcvman.Num2DisplaySize(a.bytes) filesize_display from rc_backup_spfile a, rc_backup_set_details b where a.bs_key = b.bs_key >>> #define views to get backup set summary define rc_backup_set_summary <<< CREATE Or REPLACE VIEW RC_BACKUP_SET_SUMMARY AS select db.name db_name, a.*, case when original_input_bytes/decode(output_bytes, 0, null, output_bytes) > 1 then original_input_bytes/decode(output_bytes, 0, null, output_bytes) else 1 end compression_ratio, dbms_rcvman.Num2DisplaySize(original_input_bytes) original_input_bytes_display, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display , dbms_rcvman.Num2DisplaySize(original_inprate_bytes) original_inprate_bytes_display, dbms_rcvman.Num2DisplaySize(output_rate_bytes) output_rate_bytes_display from (select db_key, count(*) num_backupsets, min(start_time) oldest_backup_time, max(start_time) newest_backup_time, sum(output_bytes) output_bytes, sum(original_input_bytes) original_input_bytes, avg(original_inprate_bytes) original_inprate_bytes, avg(output_rate_bytes) output_rate_bytes from (select unique db_key, set_stamp, set_count, start_time, output_bytes, original_input_bytes, original_inprate_bytes, output_rate_bytes, compression_ratio from rc_backup_set_details) group by db_key)a, rc_database db where db.db_key = a.db_key >>> #define views to get backup datafile summary define rc_backup_datafile_summary <<< CREATE OR REPLACE VIEW RC_BACKUP_DATAFILE_SUMMARY AS select db.name db_name, a.*, case when input_bytes/decode(output_bytes, 0, null, output_bytes) > 1 then input_bytes/decode(output_bytes, 0, null, output_bytes) else 1 end compression_ratio, dbms_rcvman.Num2DisplaySize(input_bytes) input_bytes_display, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display from (select db_key, sum(num_times_backed) num_files_backed, sum(distinct_files_backed) num_distinct_files_backed, count(distinct ts#) num_distinct_ts_backed, min(min_checkpoint_change#) min_checkpoint_change#, max(max_checkpoint_change#) max_checkpoint_change#, min(min_checkpoint_time) min_checkpoint_time, max(max_checkpoint_time) max_checkpoint_time, sum(input_bytes) input_bytes, sum(output_bytes) output_bytes from ( select a.*, b.ts#, count(distinct a.file#) over (partition by a.file#, a.creation_change#) distinct_files_backed from (select unique a.db_key, a.file#, sum(a.num_times_backed) num_times_backed, min(min_checkpoint_change#) min_checkpoint_change#, max(max_checkpoint_change#) max_checkpoint_change#, min(min_checkpoint_time) min_checkpoint_time, max(max_checkpoint_time) max_checkpoint_time, sum(input_bytes) input_bytes, sum(output_bytes) output_bytes, creation_change# from ((select a.db_key, file#,count(*) over (partition by a.db_key,file#, creation_change#) num_times_backed, min(checkpoint_change#) over (partition by a.db_key,file#, creation_change#) min_checkpoint_change#, max(checkpoint_change#) over (partition by a.db_key,file#, creation_change#) max_checkpoint_change#, min(checkpoint_time) over (partition by a.db_key,file#, creation_change#) min_checkpoint_time, max(checkpoint_time) over (partition by a.db_key,file#, creation_change#) max_checkpoint_time, sum((datafile_blocks+1)*a.block_size) over (partition by a.db_key,file#, creation_change#) input_bytes, sum((a.blocks+1)*a.block_size) over (partition by a.db_key,file#, creation_change#) output_bytes, creation_change# from rc_backup_datafile a, (select unique db_key, bs_key from rc_backup_set_details) b where a.bs_key = b.bs_key and a.db_key = b.db_key ) union (select unique a.db_key, file#, count(*) over (partition by a.db_key, file#, creation_change#) num_times_backed, min(checkpoint_change#) over (partition by a.db_key, file#, creation_change#) min_checkpoint_change#, max(checkpoint_change#) over (partition by a.db_key, file#, creation_change#) max_checkpoint_change#, min(checkpoint_time) over (partition by a.db_key, file#, creation_change#) min_checkpoint_time, max(checkpoint_time) over (partition by a.db_key,file#, creation_change#) max_checkpoint_time, sum((blocks+1)*block_size) over (partition by a.db_key,file#, creation_change#) input_bytes, sum((blocks+1)*block_size) over (partition by a.db_key,file#, creation_change#) output_bytes, creation_change# from rc_datafile_copy a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key = b.db_key ) union (select unique a.db_key, file#, count(*) over (partition by a.db_key, file#, creation_change#) num_times_backed, min(checkpoint_change#) over (partition by a.db_key, file#, creation_change#) min_checkpoint_change#, max(checkpoint_change#) over (partition by a.db_key, file#, creation_change#) max_checkpoint_change#, min(checkpoint_time) over (partition by a.db_key, file#, creation_change#) min_checkpoint_time, max(checkpoint_time) over (partition by a.db_key, file#, creation_change#) max_checkpoint_time, sum((blocks+1)*block_size) over (partition by a.db_key, file#, creation_change#) input_bytes, sum((blocks+1)*block_size) over (partition by a.db_key, file#, creation_change#) output_bytes, creation_change# from rc_proxy_datafile a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key = b.db_key )) a group by a.db_key, a.file#, creation_change#) a, (select distinct df.db_key, df.file#, df.ts#, ts.name from rc_datafile df, rc_tablespace ts where ts.ts# = df.ts# and ts.db_key = df.db_key) b where a.db_key = b.db_key and a.file# = b.file#(+)) group by db_key)a, rc_database db where a.db_key = db.db_key >>> #define views to get backup controlfile summary define rc_backup_controlfile_summary <<< CREATE OR REPLACE VIEW RC_BACKUP_CONTROLFILE_SUMMARY AS select b.name db_name, a.*, case when input_bytes/decode(output_bytes, 0, null, output_bytes) > 1 then input_bytes/decode(output_bytes, 0, null, output_bytes) else 1 end compression_ratio, dbms_rcvman.Num2DisplaySize(input_bytes) input_bytes_display, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display from (select db_key, sum(num_times_backed) num_files_backed, 1 num_distinct_files_backed, min(min_checkpoint_change#) min_checkpoint_change#, max(max_checkpoint_change#) max_checkpoint_change#, min(min_checkpoint_time) min_checkpoint_time, max(max_checkpoint_time) max_checkpoint_time, sum(input_bytes) input_bytes, sum(output_bytes) output_bytes from ((select a.db_key, count(*) over (partition by creation_time) num_times_backed, min(checkpoint_change#) over (partition by creation_time) min_checkpoint_change#, max(checkpoint_change#) over (partition by creation_time) max_checkpoint_change#, min(checkpoint_time) over (partition by creation_time) min_checkpoint_time, max(checkpoint_time) over (partition by creation_time) max_checkpoint_time, sum((blocks+1)*a.block_size) over (partition by creation_time) input_bytes, sum((blocks+1)*a.block_size) over (partition by creation_time) output_bytes, creation_time from rc_backup_controlfile a, (select unique db_key, bs_key from rc_backup_set_details) b where a.bs_key = b.bs_key and a.db_key = b.db_key ) union (select a.db_key, count(*) over (partition by creation_time) num_times_backed, min(checkpoint_change#) over (partition by creation_time) min_checkpoint_change#, max(checkpoint_change#) over (partition by creation_time) max_checkpoint_change#, min(checkpoint_time) over (partition by creation_time) min_checkpoint_time, max(checkpoint_time) over (partition by creation_time) max_checkpoint_time, sum((blocks+1)*block_size) over (partition by creation_time) input_bytes, sum((blocks+1)*block_size) over (partition by creation_time) output_bytes, creation_time from rc_controlfile_copy a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key = b.db_key ) union (select a.db_key, count(*) over (partition by creation_time) num_times_backed, min(checkpoint_change#) over (partition by creation_time) min_checkpoint_change#, max(checkpoint_change#) over (partition by creation_time) max_checkpoint_change#, min(checkpoint_time) over (partition by creation_time) min_checkpoint_time, max(checkpoint_time) over (partition by creation_time) max_checkpoint_time, sum((blocks+1)*block_size) over (partition by creation_time) input_bytes, sum((blocks+1)*block_size) over (partition by creation_time) output_bytes, creation_time from rc_proxy_controlfile a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key = b.db_key ))group by db_key)a, rc_database b where a.db_key=b.db_key >>> #define views to get backup archivelog summary define rc_backup_archivelog_summary <<< CREATE or REPLACE VIEW RC_BACKUP_ARCHIVELOG_SUMMARY AS select db.name db_name, a.*, case when input_bytes/decode(output_bytes, 0, null, output_bytes) > 1 then input_bytes/decode(output_bytes, 0, null, output_bytes) else 1 end compression_ratio, dbms_rcvman.Num2DisplaySize(input_bytes) input_bytes_display, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display from (select db_key, sum(num_files_backed) num_files_backed, sum(distinct_files_backed) num_distinct_files_backed, min(min_first_change#) min_first_change#, max(max_next_change#) max_next_change#, min(min_first_time) min_first_time, max(max_next_time) max_next_time, sum(original_input_bytes) input_bytes, sum(output_bytes) output_bytes from ((select a.db_key, num_files_backed, distinct_files_backed, min_first_change#, max_next_change#, min_first_time, max_next_time, original_input_bytes, output_bytes from (select a.db_key, count(*) num_files_backed, min(first_change#)min_first_change#, max(next_change#) max_next_change#, min(first_time)min_first_time, max(next_time) max_next_time from rc_backup_redolog a, rc_backup_set_details b where a.bs_key = b.bs_key and a.db_key = b.db_key group by a.db_key)a, (select db_key, count(*) distinct_files_backed from (select unique a.db_key, thread#, sequence#, resetlogs_change#, resetlogs_time from rc_backup_redolog a, rc_backup_set_details b where a.bs_key = b.bs_key and a.db_key = b.db_key) group by db_key)b, (select db_key, nvl(sum(original_input_bytes),0) original_input_bytes, nvl(sum(output_bytes), 0) output_bytes from (select unique db_key, bs_key, original_input_bytes, output_bytes from rc_backup_set_details where backup_type='L') group by db_key)c where a.db_key = b.db_key and b.db_key = c.db_key) union (select a.db_key, num_files_backed, distinct_files_backed, min_first_change#, max_next_change#, min_first_time, max_next_time, original_input_bytes, output_bytes from (select a.db_key, count(*) num_files_backed, min(first_change#)min_first_change#, max(next_change#) max_next_change#, min(first_time)min_first_time, max(next_time) max_next_time, nvl(sum((blocks+1)*block_size),0) original_input_bytes, nvl(sum((blocks+1)*block_size),0) output_bytes from rc_proxy_archivedlog a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key = b.db_key group by a.db_key) a, (select db_key, count(*) distinct_files_backed from (select unique a.db_key, thread#, sequence#, resetlogs_change#, resetlogs_time from rc_proxy_archivedlog a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key = b.db_key) group by db_key)b where a.db_key=b.db_key)) group by db_key)a, rc_database db where a.db_key=db.db_key >>> #define views to get backup spfile summary define rc_backup_spfile_summary <<< create or replace view RC_BACKUP_SPFILE_SUMMARY as select db.name db_name, a.*, dbms_rcvman.Num2DisplaySize(input_bytes) input_bytes_display from (select a.db_key, num_files_backed, num_distinct_files_backed, min_modification_time, max_modification_time, input_bytes from (select a.db_key, count(*) num_files_backed, min(modification_time)min_modification_time, max(modification_time) max_modification_time, sum(bytes) input_bytes from rc_backup_spfile a, rc_backup_set_details b where a.bs_key = b.bs_key and a.db_key=b.db_key group by a.db_key)a, (select db_key, count(*) num_distinct_files_backed from (select unique a.db_key, modification_time from rc_backup_spfile a, rc_backup_set_details b where a.bs_key = b.bs_key and a.db_key=b.db_key) group by db_key)b where a.db_key=b.db_key)a, rc_database db where a.db_key=db.db_key >>> #define views to get backup copy summary define rc_backup_copy_summary <<< CREATE OR REPLACE VIEW RC_BACKUP_COPY_SUMMARY AS select db.name db_name, a.*, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display from (select db_key, nvl(sum(num_times_backed),0) num_copies, sum(distinct_copies) num_distinct_copies, min(min_checkpoint_change#) min_checkpoint_change#, max(max_checkpoint_change#) max_checkpoint_change#, min(min_checkpoint_time) min_checkpoint_time, max(max_checkpoint_time) max_checkpoint_time, sum(output_bytes) output_bytes from (select unique a.db_key, file#, count(*) over (partition by file#, creation_change#) num_times_backed, count(distinct file#) over (partition by file#, creation_change#, checkpoint_change#) distinct_copies, min(checkpoint_change#) over (partition by file#, creation_change#) min_checkpoint_change#, max(checkpoint_change#) over (partition by file#, creation_change#) max_checkpoint_change#, min(checkpoint_time) over (partition by file#, creation_change#) min_checkpoint_time, max(checkpoint_time) over (partition by file#, creation_change#) max_checkpoint_time, sum((blocks+1)*block_size) over (partition by file#, creation_change#) output_bytes from rc_datafile_copy a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key=b.db_key union select unique a.db_key, 0 file#, count(*) over (partition by creation_time) num_times_backed, 1 distinct_copies, min(checkpoint_change#) over (partition by creation_time) min_checkpoint_change#, max(checkpoint_change#) over (partition by creation_time) max_checkpoint_change#, min(checkpoint_time) over (partition by creation_time) min_checkpoint_time, max(checkpoint_time) over (partition by creation_time) max_checkpoint_time, sum((blocks+1)*block_size) over (partition by creation_time) output_bytes from rc_controlfile_copy a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key=b.db_key) group by db_key)a, rc_database db where a.db_key=db.db_key >>> #define views to get proxy copy summary define rc_proxy_copy_summary <<< CREATE OR REPLACE VIEW RC_PROXY_COPY_SUMMARY AS select db.name db_name, a.*, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display from (select db_key, nvl(sum(num_times_backed),0) num_copies, sum(distinct_copies) num_distinct_copies, min(min_checkpoint_change#) min_checkpoint_change#, max(max_checkpoint_change#) max_checkpoint_change#, min(min_checkpoint_time) min_checkpoint_time, max(max_checkpoint_time) max_checkpoint_time, sum(output_bytes) output_bytes from (select unique a.db_key, file#, count(*) over (partition by file#, creation_change#) num_times_backed, count(distinct file#) over (partition by file#, creation_change#, checkpoint_change#) distinct_copies, min(checkpoint_change#) over (partition by file#, creation_change#) min_checkpoint_change#, max(checkpoint_change#) over (partition by file#, creation_change#) max_checkpoint_change#, min(checkpoint_time) over (partition by file#, creation_change#) min_checkpoint_time, max(checkpoint_time) over (partition by file#, creation_change#) max_checkpoint_time, sum((blocks+1)*block_size) over (partition by file#, creation_change#) output_bytes from rc_proxy_datafile a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key = b.db_key union select unique a.db_key, 0 file#, count(*) over (partition by creation_time) num_times_backed, 1 distinct_copies, min(checkpoint_change#) over (partition by creation_time) min_checkpoint_change#, max(checkpoint_change#) over (partition by creation_time) max_checkpoint_change#, min(checkpoint_time) over (partition by creation_time) min_checkpoint_time, max(checkpoint_time) over (partition by creation_time) max_checkpoint_time, sum((blocks+1)*block_size) over (partition by creation_time) output_bytes from rc_proxy_controlfile a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key = b.db_key) group by db_key) a, rc_database db where a.db_key = db.db_key >>> #define views to get proxy archivelog summary define rc_proxy_archivelog_summary <<< CREATE or REPLACE VIEW RC_PROXY_ARCHIVELOG_SUMMARY AS select db.name db_name, a.*, dbms_rcvman.Num2DisplaySize(output_bytes) output_bytes_display from (select a.db_key, nvl(num_files_backed, 0) num_files_backed, num_distinct_files_backed, min_first_change#, max_next_change#, min_first_time, max_next_time, output_bytes from (select a.db_key, count(*) num_files_backed, min(first_change#)min_first_change#, max(next_change#) max_next_change#, min(first_time)min_first_time, max(next_time) max_next_time, sum((blocks+1)*block_size) output_bytes from rc_proxy_archivedlog a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key=b.db_key group by a.db_key)a, (select db_key, count(*) num_distinct_files_backed from (select unique a.db_key, thread#, sequence#, resetlogs_change#, resetlogs_time from rc_proxy_archivedlog a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.status = 'A' and a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) and a.db_key=b.db_key) group by db_key)b where a.db_key=b.db_key)a, rc_database db where a.db_key=db.db_key >>> #define views to expired/unavailable backup file details define rc_unusable_backupfile_details <<< create or replace view RC_UNUSABLE_BACKUPFILE_DETAILS AS select b.session_key, a.* from (select a.db_key, f.db_name, a.rsr_key, 'BACKUPSET' btype, a.bs_key btype_key, a.set_stamp id1, a.set_count id2, 'BACKUPPIECE' filetype, a.bp_key filetype_key, a.status, a.bytes filesize, a.device_type, a.handle filename, a.media, a.media_pool from rc_backup_piece a, (select db_key, name "DB_NAME" from rc_database) f where a.status <> 'A' and a.db_key = f.db_key union select db_key, db_name, rsr_key, 'IMAGECOPY', cdf_key, null, null, 'DATAFILECOPY', cdf_key, status, (blocks+1)*block_size, 'DISK', name, null, null from rc_datafile_copy where status <> 'A' union select db_key, db_name, rsr_key, 'IMAGECOPY', ccf_key, null, null, 'CONTROLFILECOPY', ccf_key, status, (blocks+1)*block_size, 'DISK', name, null, null from rc_controlfile_copy where status <> 'A' union select db_key, db_name, rsr_key, 'PROXYCOPY', xdf_key, null, null, 'DATAFILECOPY', xdf_key, status, (blocks+1)*block_size, device_type, handle, media, media_pool from rc_proxy_datafile where status <> 'A' union select db_key, db_name, rsr_key, 'PROXYCOPY', xcf_key, null, null, 'CONTROLFILECOPY', xcf_key, status, (blocks+1)*block_size, device_type, handle, media, media_pool from rc_proxy_controlfile where status <> 'A' union select db_key, db_name, rsr_key, 'PROXYCOPY', xal_key, null, null, 'ARCHIVELOGCOPY', xal_key, status, (blocks+1)*block_size, device_type, handle, media, media_pool from rc_proxy_archivedlog where status <> 'A') a, rc_rman_status b, (select /*+ no_merge */ dbms_rcvman.sv_getsessionkey skey from dual) c, (select /*+ no_merge */ dbms_rcvman.sv_getsessionfromTimeRange fTime from dual) d, (select /*+ no_merge */ dbms_rcvman.sv_getsessionuntilTimeRange uTime from dual) e where a.rsr_key = b.rsr_key (+) and (c.skey is null or c.skey = b.session_key) and (d.fTime is null or d.fTime <= b.start_time) and (e.uTime is null or e.uTime >= b.end_time) >>> # define backup input type hierarchy view corresponding to v$rman_backup_type define rc_rman_backup_type <<< create or replace view rc_rman_backup_type as select 9 weight, 'DB FULL' input_type from dual union select 8, 'RECVR AREA' from dual union select 7, 'DB INCR' from dual union select 6, 'DATAFILE FULL' from dual union select 5, 'DATAFILE INCR' from dual union select 4, 'ARCHIVELOG' from dual union select 3, 'CONTROLFILE' from dual union select 2, 'SPFILE' from dual union select 1, 'BACKUPSET' from dual >>> # define view to get restore point list define rc_restore_point <<< create or replace view rc_restore_point as select dbinc_key, null recid, null stamp, site_key, rspname name, rsptime restore_point_time, creation_time creation_time, to_scn scn, 'NO' long_term, 'YES' preserved, /* All guaranteed are also preserved */ guaranteed guarantee_flashback_database from grsp union select dbinc_key, nrsp_recid recid, nrsp_stamp stamp, site_key, rspname name, rsptime restore_point_time, creation_time creation_time, to_scn scn, long_term, 'NO' preserved, 'NO' guarantee_flashback_database from nrsp >>> # define view to get standby site information define rc_site <<< create or replace view rc_site as select site_key, db_key, database_role, cf_create_time, substr(db_unique_name, 1, 30) db_unique_name from node >>> # Drop view definitions define drop_rc_rman_backup_subjob_details <<< drop view rc_rman_backup_subjob_details >>> define drop_rc_rman_backup_job_details <<< drop view rc_rman_backup_job_details >>> define drop_rc_backup_set_details <<< drop view rc_backup_set_details >>> define drop_rc_backup_piece_details <<< drop view rc_backup_piece_details >>> define drop_rc_backup_copy_details <<< drop view rc_backup_copy_details >>> define drop_rc_proxy_copy_details <<< drop view rc_proxy_copy_details >>> define drop_rc_proxy_archivelog_details <<< drop view rc_proxy_archivelog_details >>> define drop_rc_backup_datafile_details <<< drop view rc_backup_datafile_details >>> define drop_rc_backup_controlfile_details <<< drop view rc_backup_controlfile_details >>> define drop_rc_backup_archivelog_details <<< drop view rc_backup_archivelog_details >>> define drop_rc_backup_spfile_details <<< drop view rc_backup_spfile_details >>> define drop_rc_backup_set_summary <<< drop view rc_backup_set_summary >>> define drop_rc_backup_datafile_summary <<< drop view rc_backup_datafile_summary >>> define drop_rc_backup_controlfile_summary <<< drop view rc_backup_controlfile_summary >>> define drop_rc_backup_archivelog_summary <<< drop view rc_backup_archivelog_summary >>> define drop_rc_backup_spfile_summary <<< drop view rc_backup_spfile_summary >>> define drop_rc_backup_copy_summary <<< drop view rc_backup_copy_summary >>> define drop_rc_proxy_copy_summary <<< drop view rc_proxy_copy_summary >>> define drop_rc_proxy_archivelog_summary <<< drop view rc_proxy_archivelog_summary >>> define drop_rc_unusable_backupfile_details <<< drop view rc_unusable_backupfile_details >>> define drop_rc_rman_backup_type <<< drop view rc_rman_backup_type >>> define drop_rc_restore_point <<< drop view rc_restore_point >>> define drop_rc_site <<< drop view rc_site >>> # dbmsrvct.sql # # NAME # dbmsrvct.sql - recovery catalog package spec # # DESCRIPTION # This package contains procedures for maintaining # the recovery catalog. # # NOTES # This is an internal package used only by the Recovery Manager. # It is the only interface for updating the recovery catalog. # It must be created in the same database and schema as the # recovery catalog tables. # # MODIFIED (MM/DD/YY) # swerthei 06/02/98 - add media_pool # swerthei 06/01/98 - add changeProxyCopy # swerthei 05/18/98 - resync proxy copy records # dbeusee 04/20/98 - dbeusee_rpt_redundancy_enh # gpongrac 05/06/98 - add bsStatusRecalc # dbeusee 04/08/98 - xcheck enh. # fsanchez 03/30/98 - Duplexed Backup sets # gpongrac 09/02/97 - add setDatafileSize # gpongrac 06/30/97 - keep offline clean and read-only scn in df table # gpongrac 04/03/97 - consider offline ranges in ckptNeeded # gpongrac 03/31/97 - change to use version_time instead of cf_create_t # gpongrac 03/31/97 - add cf_create_time to offr # tpystyne 02/27/97 - add ckptNeeded # gpongrac 02/20/97 - add completion_time to bdf # swerthei 01/14/97 - add setclonename # swerthei 12/10/96 - add backup piece/set time columns # tpystyne 12/13/96 - maintain parent dbinc # tpystyne 12/12/96 - add rbs_count to checkTableSpace # gpongrac 11/20/96 - make this_dbinc_key public; # gpongrac 11/05/96 - add high_df_recid to beginCkpt # swerthei 10/23/96 - add checkDataFileCopy.completion_time # swerthei 10/23/96 - add checkArchivedLog.completion_time # tpystyne 10/05/96 - add changeControlfileCopy # tpystyne 09/12/96 - add archived parameter to checkArchivedLog # tpystyne 09/06/96 - add online redo log support # gpongrac 08/23/96 - add commit function # tpystyne 08/26/96 - add min_offr_recid # gpongrac 08/13/96 - add stored script support # tpystyne 07/24/96 - add incr_level to cdf # tpystyne 06/24/96 - implement offline range resync # tpystyne 06/22/96 - remove unused columns from ts # tpystyne 06/04/96 - add create_time to checkBackup/CopyControlFile # asurpur 04/09/96 - Dictionary Protection Implementation # tpystyne 03/28/96 - use raise_application_error # tpystyne 03/21/96 - update function prototypes # tpystyne 01/31/96 - update for set_stamp and set_count # gpongrac 01/10/96 - drop domain from registerDataase and setDatabase # tpystyne 12/07/95 - update comments # tpystyne 12/01/95 - add deleted object resync # tpystyne 11/27/95 - split out dbms_rman package # tpystyne 10/31/95 - incorporate design changes # tpystyne 10/23/95 - add archive log procedures # gpongrac 10/11/95 - add restore and recovery procedures # tpystyne 10/09/95 - fix procedure prototypes # tpystyne 10/03/95 - fix backup set procedure prototypes # tpystyne 10/02/95 - remove rbs procedure calls # tpystyne 09/27/95 - update parameter names to match with column names # gpongrac 09/25/95 - add blocksize args to addBackupDatafile, add addB # gpongrac 09/25/95 - add procedures for backup job steps # gpongrac 09/21/95 - add creation SCN as arg to checkTs # gpongrac 08/08/95 - recovery catalog manager package spec # gpongrac 08/08/95 - Created # The dbms_rcvcat package is used by the Recovery Manager to maintain the # recovery catalog. The Recovery Manager will periodically query the # controlfile of the target database and propagate the information to # the recovery catalog using dbms_rcvcat. The Recovery Manager is the only # intended client for this package. All PL/SQL code that access this # packages is in recover.bsq. # The errors raised by dbms_rcvcat are in the 20000 - 20999 range and # described in rmanus.msg. Recovery Manager will display the error numbers # with text from its error message file. # The package must be created in the same schema as the recovery catalog # tables and views. # the recovery catalog can store information about many target databases. # # a target database must be registered before any information about it can # be recorded in the recovery catalog. # # registerDatabase; # # if the target database is opened using the resetlogs option, old backups # and archived logs cannot be used anymore and the database is said to # have a new incarnation. The new incarnation must be registered and # made the current incarnation of target database by calling resetDatabase. # # resetDatabase; # # to resync the recovery catalog with the target database the following # sequence of calls must be executed (in the given order). The procedures # will do minimal checking to enforce that calls are executed in right order, # but it is the responsility of the client to ensure that the protocol is # not violated. # # the resync operation is one transaction; it is either committed by endCkpt # or rolled back by cancelCkpt. The client must not directly commit or # rollback. # the full resync operation will first make a snapshot of the control file # and read all information from the snapshot. The partial resync operation # used by backup job will read from the current controlfile, but only the # circular-reuse type records. # lockFor locks the database incarnation to ensure that only one resync # can progress at a time (concurrent resyncs wouldn't make sense and # would be hard to coordinate). However, the resync operation tolerates # Recovery Manager commands that update the recovery catalog directly. # # begin # setDatabase; # lockForCkpt; # Read controlfile records and pass correct watermarks beginCkpt; # done in krmknrsn(); # needCkpt; # beginCkpt; # beginTableSpaceResync; # checkTableSpace; # ... # endTableSpaceResync; # beginDataFileResync; # checkDataFile; # ... # endDataFileResync; # beginTempFileResync; # checkTempFile; # ... # endTempFileResync; # beginThreadResync; # checkThread; # ... # endThreadResync; # if (not current controlfile) then # mine duplicate records to find approximate previous resync time; # end if; # beginLogHistoryResync; # getLogHistoryLowSCN; # checkLogHistory; # ... # endLogHistoryResync; # beginArchivedLogResync; # checkArchivedLog; # ... # endArchivedLogResync; # beginBackupSetResync; # checkBackupSet; # ... # endBackupSetResync; # beginBackupPieceResync; # checkBackupPiece; # ... # endBackupPieceResync; # beginBackupDataFileResync; # checkBackupDataFile; # ... # endBackupDataFileResync; # beginBackupSpFileResync; # checkBackupSpFile; # ... # endBackupDataSpResync; # beginBackupRedoLogResync; # checkBackupRedoLog; # ... # endBackupRedoLogResync; # beginDataFileCopyResync; # checkDataFileCopy; # ... # endDataFileCopyResync; # beginBlockCorruptionResync; # checkBlockCorruption; # ... # endBlockCorruptionResync; # endCkpt; # exception # when dbms_rcvcat.internal_error or others then # cancelCkpt; # end # the change command uses the following procedures. Each call is a transaction # changeBackupPiece; # changeDatafileCopy; # changeArchivedLog; define dbmsrvct_sql <<< create or replace package dbms_rcvcat authid current_user is -- public constants TRUE# CONSTANT number := 1; FALSE# CONSTANT number := 0; -- Used to identify if the upgrade of catalog schema was complete UPGRADE_COMPLETED CONSTANT number := 1; -- resync types RESYNC_FULL CONSTANT number := 1; RESYNC_PARTIAL CONSTANT number := 2; RESYNC_NONE CONSTANT number := 3; CONFIGRESYNC_NO CONSTANT number := 0; CONFIGRESYNC_TORC CONSTANT number := 1; CONFIGRESYNC_TOCF CONSTANT number := 2; CONFIGRESYNC_TORC_TOCF CONSTANT number := 3; -- These constants must match the krmkct definition. -- controlfile types CF_CURRENT CONSTANT number := 1; CF_BACKUP CONSTANT number := 2; CF_CREATED CONSTANT number := 3; CF_STANDBY CONSTANT number := 4; CF_CLONE CONSTANT number := 5; CF_NOMOUNT CONSTANT number := 6; this_db_key number := NULL; this_dbinc_key number := NULL; RESYNC_REASON_NOACTION CONSTANT number := 1; -- do not display reasons RESYNC_REASON_NONE CONSTANT number := 2; -- no reason is yet set RESYNC_REASON_DF CONSTANT number := 3; RESYNC_REASON_TF CONSTANT number := 4; RESYNC_REASON_TS CONSTANT number := 5; RESYNC_REASON_THR CONSTANT number := 6; RESYNC_REASON_ORL CONSTANT number := 7; RESYNC_REASON_CONF CONSTANT number := 8; RESYNC_REASON_CF CONSTANT number := 9; RESYNC_REASON_RSL CONSTANT number := 10; RESYNC_REASON_INC CONSTANT number := 11; RESYNC_REASON_RESET CONSTANT number := 12; resync_reason number := RESYNC_REASON_NONE; doResyncReasons boolean := FALSE; RESYNC_ACTION_ADD CONSTANT number := 1; RESYNC_ACTION_DROP CONSTANT number := 2; RESYNC_ACTION_CHANGE CONSTANT number := 3; RESYNC_ACTION_RECREATE CONSTANT number := 4; RESYNC_ACTION_RENAME CONSTANT number := 5; RESYNC_ACTION_RESIZE CONSTANT number := 6; TYPE resyncActionNames_t IS VARRAY(6) OF varchar2(12); -- Keep in sync with RESYNC_ACTION_XXXXX above RESYNC_ACTION_NAMES CONSTANT resyncActionNames_t := resyncActionNames_t('added', 'dropped', 'changed', 'recreated', 'renamed', 'resized'); TYPE resyncActionTaken_t IS VARRAY(6) OF boolean; TYPE resyncActionCounts_t IS VARRAY(6) OF number; RESYNC_OBJECT_TABLESPACE CONSTANT number := 1; RESYNC_OBJECT_DATAFILE CONSTANT number := 2; RESYNC_OBJECT_TEMPFILE CONSTANT number := 3; RESYNC_OBJECT_REDOTHREAD CONSTANT number := 4; RESYNC_OBJECT_ONLINELOG CONSTANT number := 5; TYPE resyncActionObjects_t IS VARRAY(5) OF varchar2(16); -- Keep in sync with RESYNC_OBJECT_XXXXX above RESYNC_ACTION_OBJECTS CONSTANT resyncActionObjects_t := resyncActionObjects_t('Tablespace', 'Datafile', 'Tempfile', 'Redo thread', 'Online redo log'); -- Debug levels constants RCVCAT_LEVEL_ZERO CONSTANT number := 0; RCVCAT_LEVEL_MIN CONSTANT number := 1; RCVCAT_LEVEL_LOW CONSTANT number := 5; RCVCAT_LEVEL_MID CONSTANT number := 9; RCVCAT_LEVEL_HI CONSTANT number := 12; RCVCAT_LEVEL_MAX CONSTANT number := 15; RCVCAT_LEVEL_DEFAULT CONSTANT number := RCVCAT_LEVEL_MID; TYPE fullResyncActions_t IS RECORD ( active boolean, valid boolean, lastobjno number, objtype number, actTaken resyncActionTaken_t, actCount resyncActionCounts_t ); fullResyncAction fullResyncActions_t; -- := -- fullResyncActions_t(FALSE, FALSE, -1, NULL, -- RESYNC_ACTION_TAKEN_NONE, -- RESYNC_ACTION_COUNTS_ZERO); /*-----------------------* * Debugging functions * *-----------------------*/ PROCEDURE setDebugOn(dbglevel IN NUMBER DEFAULT RCVCAT_LEVEL_DEFAULT); PROCEDURE setDebugOff; /*-----------------------* * Database Registration * *-----------------------*/ PROCEDURE registerDatabase( db_id IN number ,db_name IN varchar2 ,reset_scn IN number ,reset_time IN date ); -- registerDatabase registers a new target database in the recovery catalog. -- All target databases must be registered exactly once. The database -- is inserted into the db table, and also one row is inserted into the dbinc -- table as the root and the current incarnation of this database. -- Note that it is legal to register a database that has undergone a previous -- resetlogs, but recovery to a time prior to the resetlogs is not supported. -- registerDatabase is atomic; all changes to recovery catalog are committed -- or rolled back if an exception is signalled -- Input parameters: -- db_id -- the value of kccfhdbi from the controlfile of the target database -- db_name -- the name of the database -- reset_scn -- the resetlogs SCN of this database -- reset_time -- the resetlogs time -- Exceptions: -- DUPLICATE_DATABASE -- a database with the same db_id has already been registered. This may -- happen if the database was created by copying datafiles from an -- existing datafile -- CHECKPOINT_IN_PROGRESS (internal) -- a recovery catalog checkpoint is in progress. The current checkpoint -- must be ended or canceled before calling registerDatabase -- BACKUP_IN_PROGRESS (internal) -- a backup conversation is in progress. The current backup conversation -- must be ended or canceled before calling registerDatabase procedure resetDatabase( db_id IN number ,db_name IN varchar2 ,reset_scn IN number ,reset_time IN date ,parent_reset_scn IN number ,parent_reset_time IN date ); function resetDatabase( db_id IN number ,db_name IN varchar2 ,reset_scn IN number ,reset_time IN date ,parent_reset_scn IN number ,parent_reset_time IN date ) return number; procedure resetDatabase( dbinc_key IN number ,db_name IN varchar2 ); procedure resetDatabase( dbinc_key IN number ,db_name IN varchar2 ,reset_scn OUT number ,reset_time OUT date ,db_id IN number DEFAULT NULL ); -- There are four forms of resetDatabase procedure. -- The first form registers a new incarnation of an registered database -- and makes it the current incarnation. It must always be called after -- the target database is opened with resetlogs option. -- The second form changes the current incarnation of a registered database. -- It must be called after the target database is implicitly changes -- the incarnation during flashback database. -- The thrid form makes an old incarnation the current incarnation. -- registerDatabase is atomic; all changes to recovery catalog are committed -- or rolled back if an exception is signalled -- The fourth one is called by RMAN versions greater than 10i, to get resetlogs -- SCN of the incarnation reset by user. This reset_scn is used to change -- current incarnation pointer in mounted controlfile if the incarnation is -- already known to controlfile. If incarnation is not known to mounted -- control file, user must restore a file that belongs to new incarnation. -- Input parameters: -- dbinc_key -- the primary key of an existing database incarnation in the recovery -- catalog. Only used by the second form. -- db_id -- the value of kccfhdbi from the controlfile of the target database -- db_name -- the name of the database -- reset_scn -- the resetlogs SCN of this database -- reset_time -- the resetlogs time -- Exceptions: -- DB_ID_IS_NULL (internal) -- a null db_id was given -- DATABASE_NOT_FOUND -- No database with the given db_id was found in the recovery catalog -- Use registerDatabase procedure instead -- RESET_SCN_TOO_LOW -- RESET_TIME_TOO_LOW -- CHECKPOINT_IN_PROGRESS (internal) -- a recovery catalog checkpoint is in progress. The current checkpoint -- must be ended or canceled before calling resetDatabase -- BACKUP_IN_PROGRESS (internal) -- a backup conversation is in progress. The current backup conversation -- must be ended or canceled before calling resetDatabase procedure unregisterDatabase( db_key IN NUMBER DEFAULT NULL ,db_id IN NUMBER ); -- -- unRegisterDatabase removes a registered database from the recovery catalog -- -- Input parameters: -- db_key -- The primary key of an existing database in the recovery catalog. This -- parameter is optional and not required because db_id uniquely -- identifies the database. Furthermore, the client (that is RMAN) does not -- know what is db_key. The parameter is here only because of -- compatibility reasons. -- db_id -- The value of kccfhdbi from the control file of the target database. -- -- Exceptions: -- CHECKPOINT_IN_PROGRESS (internal) -- Recovery catalog resync is in progress. The current resync must be -- ended or canceled before calling unregisterDatabase. -- NO_DATA_FOUND -- Database not fund in the catalog. -- /*--------------------------* * Set Database Incarnation * *--------------------------*/ procedure setDatabase( db_name IN varchar2 ,reset_scn IN number ,reset_time IN date ,db_id IN number ,db_unique_name IN varchar2 ,dummy_instance IN boolean ,cf_type IN number ,site_aware IN boolean default FALSE ); procedure setDatabase( db_name IN varchar2 ,reset_scn IN number ,reset_time IN date ,db_id IN number ,db_unique_name IN varchar2 default NULL); procedure setDatabase(dbinc_key number); procedure setDatabase; -- setDatabase selects which target database subsequent dbms_rcvcat -- procedures operate on. Note that only the current incarnation can be -- selected. If the target database or its current incarnation is not -- registered then setDatabase will fail. -- setDatabase sets the package state variables to point to the selected -- database and its current incarnation. -- The settings will be valid until the end of the session unless setDatabase -- is called again -- Input parameters: -- db_id -- the value of kccfhdbi from the controlfile of the target database -- if not set and succesfully connected to database it will set to -- current dbid of target database -- db_name -- the name of the database -- reset_scn -- the resetlogs SCN of this database -- reset_time -- the resetlogs time -- dummy_instance -- TRUE if the instance is a dummy that is started by RMAN -- cf_type -- type of controlfile used by instance -- Exceptions: -- DATABASE_NOT_FOUND -- No database with the given db_id was found in the recovery catalog -- The database must be registered using registerDatabase first -- DATABASE_INCARNATION_NOT_FOUND -- No database incarnation matches the given arguments -- The database incarnation must be registered using resetDatabase first -- CHECKPOINT_IN_PROGRESS (internal) -- a recovery catalog checkpoint is in progress. The current checkpoint -- must be ended or canceled before calling setDatabase /*-----------------------------* * Recovery Catalog Checkpoint * *-----------------------------*/ function ckptNeeded( ckp_scn IN number ,ckp_cf_seq IN number ,cf_version IN date ,cf_type IN number ,high_df_recid IN number ,high_orl_recid IN number ,high_cdf_recid IN number ,high_al_recid IN number ,high_bp_recid IN number ,high_do_recid IN number ,high_offr_recid IN number ,high_pc_recid IN number DEFAULT NULL -- for compatibility ,high_conf_recid IN number DEFAULT NULL -- for compatibility ,rltime IN DATE DEFAULT NULL -- for compatibility ,high_ts_recid IN number DEFAULT NULL -- for compatibility ,high_bs_recid IN number DEFAULT NULL -- for compatibility ,lopen_reset_scn IN number DEFAULT NULL -- for compatibility ,lopen_reset_time IN DATE DEFAULT NULL -- for compatibility ,high_ic_recid IN number DEFAULT NULL -- for compatibility ,high_tf_recid IN number DEFAULT NULL -- for compatibility ,high_rt_recid IN number DEFAULT NULL -- for compatibility ,high_grsp_recid IN number DEFAULT NULL -- for compatibility ,high_nrsp_recid IN number DEFAULT NULL -- for compatibility ,high_bcr_recid IN number DEFAULT NULL -- for compatibility ) return number; PROCEDURE lockForCkpt; procedure beginCkpt( ckp_scn IN number ,ckp_cf_seq IN number ,cf_version IN date ,ckp_time IN date ,ckp_type IN varchar2 ,ckp_db_status IN varchar2 ,high_df_recid IN number ,cf_type IN varchar2 DEFAULT 'CURRENT' -- for compatibility reasons ); -- ckptNeeded determines whether recovery catalog is current enough -- for name translation. It is used by RMAN to check whether an implicit -- resync is needed before compiling a new backup, copy, restore or recover -- command. -- beginCkpt begins a new recovery catalog checkpoint. It records the -- checkpoint scn and controlfile sequence number of the backup control file -- used for the checkpoint. The ckp_scn must greater than ckp_scn of the -- previous checkpoint or if the ckp_scns are equal then the ckp_cf_seq -- must be greater ckp_cf_seq of the previous checkpoint. If also the -- ckp_sf_seqs are equal, then the recovery catalog has already been resynced -- with this backup controlfile. Otherwise the backup controlfile is too old. -- -- Since the current control file doesn't have ckp_scn partial resyncs -- check only that ckp_cf_seq advances. -- -- if the cf_create_time has not changed since the previous checkpoint, -- we assume that the controlfile has not been recreated. If it has changed -- the controlfile must have been recreated and all recids in the controlfiles -- have effectively been reset. Therefore beginCkpt resets the highwater marks -- for all recids to 0 to force this resync will read all entries from -- the newly create controlfile. -- -- It locks the database incarnation row in the dbinc table to prevent -- multiple simultaneous checkpoint from happening. The endCkpt procedure must -- be called in the end to commit the recovery catalog checkpoint transaction. -- Alternatively cancelCkpt can be called to rollback the transaction if -- something goes wrong. -- -- Input Parameters: -- ckp_scn -- controlfile checkpoint scn -- ckp_cf_seq -- controlfile sequence number -- cf_create_time -- control file creation timestamp -- ckp_time -- controlfile checkpoint timestamp -- ckp_type -- 'FULL' full resync from a snapshot control file -- 'JOB' partial resync from current control file -- high_*_recid -- High recid of coresponding controlfile records -- cf_type -- controlfile type CURRENT, STANDBY, BACKUP, CLONE etc -- Exceptions: -- CHECKPOINT_IN_PROGRESS (internal) -- a recovery catalog checkpoint is in progress. The current checkpoint -- must be ended or canceled before beginning a new one -- DATABASE_INCARNATION_NOT_SET -- the database incarnation is not set. It must be set by calling -- setDatabase before calling beginCkpt -- INVALID_CKP_SCN -- the backup controlfile ckp_scn is less than ckp_scn of the previous -- checkpoint. This should not happen. However, it is conceivable that this -- could happen if the user restores the database from an offline backup -- and doesn't open the database with resetlogs. -- INVALID_CKP_CF_SEQ -- the backup controlfile ckp_cf_seq is less than ckp_cf_seq of the -- previous checkpoint. This should not happen. See the explanation of the -- the previous exception. -- CKP_ALREADY_RECORDED -- the backup control file ckp_scn and ckp_cf_seq are equal to the previous -- checkpoint. Nothing was changed in the control file between this -- this checkpoint and the previous one, so the there is no reason to -- resync -- CHECKPOINT_TYPE_INVALID -- the ckp_type is not valid ('ONLINE' or 'OFFLINE') procedure endCkpt; -- endCkpt must be called after all information is successfully resynced. -- Note that endCkpt cannot verify that everything was successfully resynced, -- the client must not call endCkpt if it encountered unrecoverable errors -- during the checkpoint. -- It will commit the checkpoint transaction and clear the package state -- variables to indicate that no checkpoint is in progress -- Exceptions: -- CHECKPOINT_NOT_IN_PROGRESS (internal) -- there is no checkpoint in progress to be ended. Maybe cancelCkpt was -- called before this. procedure cancelCkpt; -- cancelCkpt must be called if unrecoverable error is encountered during -- the checkpoint. It will rollback the checkpoint transaction and clear -- the package state variables to indicate that no checkpoint is in progress /*-------------------* * Resync * *-------------------*/ -- Resyncing all non-circular-reuse type records (tablespace, datafile and -- redo thread) from the controlfile use the following protocol. The -- controlfile maintains a high_recid for each record type. The high_recid -- is incremented every time a record of that type is created, updated or -- deleted. The client gets the high_recid for the record type -- from the controlfile and passes it to beginResync. The beginResync -- compares the given high_recid with the high_recid stored in the recovery -- catalog. If the high_recids are equal then nothing has changed in the -- controlfile since the previous resync and beginResync returns false. -- If the given high_recid is greater than the one stored in the recovery -- catalog then beginResync will return true and the client will query -- all records from the controlfile and call Check procedure for each -- record. -- Resyncing all circular-reuse type records from the controlfile use another -- protocol. Since the circular-reuse type records are never updated -- (significantly) after they have been created, only records created after -- the previous resync need to be resynced. The client calls the beginResync -- procedure which returns the high_recid stored in the recovery catalog. -- The client compared the returned high_recid with the high_recid in the -- controlfile. If the high_recids are equal then no new record have created -- since the previous resync. If the high_recid in controlfile is greater -- than the one returned by beginResync, then the client will query all -- records with recid greater than the returned high_recid from the controlfile -- and call Check procedure for each record. -- The parameters for all Check procedures match the columns in the -- corresponding V$ views on the controlfile fixed tables. /*-------------------* * Tablespace Resync * *-------------------*/ function beginTableSpaceResync( high_ts_recid IN NUMBER, force IN BOOLEAN DEFAULT FALSE ) return boolean; -- beginTableSpaceResync should be called to start the process of resyncing -- information about tablespaces. -- Returns: -- TRUE --> tablespace resync started -- FALSE --> tablespace resync not started (since there is nothing to resync) -- Exceptions: -- CHECKPOINT_NOT_IN_PROGRESS procedure checkTableSpace( ts_name IN varchar2 ,ts# IN number ,create_scn IN number ,create_time IN date ,rbs_count IN number DEFAULT NULL ,included_in_database_backup IN varchar2 DEFAULT NULL ,bigfile IN varchar2 DEFAULT NULL ,temporary IN varchar2 DEFAULT NULL ,encrypt_in_backup IN varchar2 DEFAULT NULL ,plugin_scn IN number DEFAULT 0 ); -- checkTableSpace inserts or updates information about the given tablespace. -- It must be called once for each tablespace currently listed in -- the controlfile in ascending ts# order. If checkTableSpace notices that a -- tablespace is dropped, either because its ts# is not passed to it or -- because a new tablespace with same ts# but higher create_scn is passed, -- it marks the tablespace dropped in the recovery catalog. -- -- Input Parameters: -- ts_name -- The name of the tablespace -- ts# -- The ktsn number of the tablespace -- create_scn -- The lowest cretion SCN among all datafiles in the tablespace -- create_time -- Timestamp for create_scn -- rbs_count -- included_in_database_backup -- Flag telling if the tablespace is included in full backups. -- 'YES'or NULL - included in whole database backup -- 'NO' - not included in whole database backup -- bigfile -- Flag telling if the tablespace is bigfile or smallfile -- 'NO' or NULL - smallfile -- 'YES' - bigfile -- tempts -- Flag telling if the tablespace is temporary or not -- 'NO' or NULL - not a temporary tablespace -- 'YES' - temporary tablespace -- Exceptions: -- TS_RESYNC_NOT_STARTED -- BAD_TS_ORDER -- tablespaces were passed in wrong order -- INVALID_CREATE_SCN -- create_scn is higher than ckp_scn or create scn is lower than the -- create scn of the previous incarnation -- INVALID_CREATE_TIME -- INVALID_TS_NAME procedure endTableSpaceResync; -- endTableSpaceResync must be called after the last call to checkTablespace. -- Exceptions: -- TS_RESYNC_NOT_STARTED /*-----------------* * Datafile Resync * *-----------------*/ function beginDataFileResync( high_df_recid IN number ) return boolean; -- beginDataFileResync must be called to start the process of resyncing -- information about datafiles. The tablespace information must be resync'd -- before doing a datafile resync. -- Exceptions: -- TS_RESYNC_NOT_COMPLETE procedure checkDataFile(file# IN NUMBER, fname IN VARCHAR2, create_scn IN NUMBER, create_time IN DATE, blocks IN NUMBER, block_size IN NUMBER, ts# IN NUMBER, stop_scn IN NUMBER, read_only IN NUMBER, stop_time IN DATE DEFAULT NULL, rfile# IN NUMBER DEFAULT NULL, aux_fname IN VARCHAR2 DEFAULT NULL, foreign_dbid IN number DEFAULT 0, foreign_create_scn IN number DEFAULT 0, foreign_create_time IN date DEFAULT NULL, plugged_readonly IN varchar2 DEFAULT 'NO', plugin_scn IN number DEFAULT 0, plugin_reset_scn IN number DEFAULT 0, plugin_reset_time IN date DEFAULT NULL, create_thread IN number DEFAULT NULL, create_size IN number DEFAULT NULL); -- checkDataFile inserts or updates information about the given datafile. -- It must be called once for each datafile currently listed in -- the controlfile in ascending file# order. checkDataFile will also mark -- datafiles which are not listed in the controlfile anymore as dropped -- in rcvcat. Note that datafiles may disappear from controlfile if it is -- recreated. -- -- Input Parameters: -- file# -- datafile number -- fname -- the current name of the datafile -- create_scn -- the creation SCN -- create_time -- the creation timestamp -- fsize -- the current size of the datafile in blocks -- ts# -- the ktsn of the tablespace to which this datafile belongs -- aux_name -- file name of auxname -- 'NONE' - auxname is not configured -- NULL - auxname is unknown -- Exceptions: -- DF_RESYNC_NOT_STARTED procedure endDataFileResync; -- endDataFileResync must be called after all datafiles have been passed in. -- Exceptions: -- DF_RESYNC_NOT_STARTED -- Following procedures are used to resync file names from controlfile -- to recovery catalog. These names will be used during restores. function beginDataFileResyncForStandby( high_df_recid IN number ) return boolean; procedure checkDataFileForStandby( file# IN NUMBER, fname IN VARCHAR2, create_scn IN NUMBER, create_time IN DATE, blocks IN NUMBER, block_size IN NUMBER, ts# IN NUMBER, rfile# IN NUMBER, stop_scn IN NUMBER, read_only IN NUMBER, foreign_dbid IN NUMBER, plugin_scn IN NUMBER); -- checkDataFileForStandby inserts or updates information about the given -- datafile. It must be called once for each datafile currently listed in -- the controlfile in ascending file# order. checkDataFile will also mark -- datafiles which are not listed in the controlfile anymore as dropped -- in rcvcat. Note that datafiles may disappear from controlfile if it is -- recreated. procedure endDataFileResyncForStandby; function beginTempFileResyncForStandby( high_tf_recid IN number ) return boolean; procedure checkTempFileForStandby (file# IN NUMBER, fname IN VARCHAR2, create_scn IN NUMBER, create_time IN DATE, blocks IN NUMBER, block_size IN NUMBER, ts# IN NUMBER, rfile# IN NUMBER, autoextend IN VARCHAR2, max_size IN NUMBER, next_size IN NUMBER); procedure endTempFileResyncForStandby; procedure setDatafileSize( file# IN number ,create_scn IN number ,blocks IN number ,plugin_scn IN number DEFAULT 0 ); /*-----------------* * TempFile Resync * *-----------------*/ function tempFileToResync( high_tf_recid IN number ) return boolean; -- tempFileToResync is called to start the process of resyncing tempfile -- tablespace before resyncing tempfiles. As the same high_ts_recid is -- used to track permanent tablespace and temp tablespace, we use this -- function to trigger a tablespace resync whenever there is a new tempfile -- is added. function beginTempFileResync( high_tf_recid IN number ) return boolean; -- beginTempFileResync must be called to start the process of resyncing -- information about tempfiles. The tablespace information must be resync'd -- before doing a tempfile resync. -- Exceptions: -- TS_RESYNC_NOT_COMPLETE procedure checkTempFile(file# IN NUMBER, fname IN VARCHAR2, create_scn IN NUMBER, create_time IN DATE, blocks IN NUMBER, block_size IN NUMBER, ts# IN NUMBER, rfile# IN NUMBER, autoextend IN VARCHAR2, max_size IN NUMBER, next_size IN NUMBER); -- checkTempFile inserts or updates information about the given tempfile. -- It must be called once for each tempfile currently listed in -- the controlfile in ascending file# order. checkTempFile will also mark -- tempfiles which are not listed in the controlfile anymore as dropped -- in rcvcat. Note that tempfiles may disappear from controlfile if it is -- recreated. -- -- Input Parameters: -- file# -- tempfile number -- fname -- the current name of the tempfile -- create_scn -- the creation SCN -- create_time -- the creation timestamp -- blocks -- the current size of the tempfile in blocks -- block_size -- the current blocksize of the tempfile -- ts# -- the ktsn of the tablespace to which this tempfile belongs -- rfile# -- tablespace relative file# -- autoextend -- ON to indicate the tempfile is auto extensible. Otherwise, OFF. -- max_size -- maximum size to which file can extend. -- next_size -- amount of increment which file extends to maximum size. -- Exceptions: -- TF_RESYNC_NOT_STARTED procedure endTempFileResync; -- endTempFileResync must be called after all tempfiles have been passed in. -- Exceptions: -- TF_RESYNC_NOT_STARTED /*---------------------* * Redo Thread resync * *---------------------*/ function beginThreadResync( high_rt_recid IN number ) return boolean; -- beginThreadResync begins the redo thread resync. procedure checkThread( thread# IN number ,last_sequence# IN number ,enable_scn IN number ,enable_time IN date ,disable_scn IN number ,disable_time IN date ,status IN varchar2 ); -- checkThread inserts or updates information about the given redo thread. -- It must called once for each redo thread currently listed in the -- control file in ascending thread# order. -- Input Parameters: -- thread# -- thread number -- last_sequence# -- last log sequence number allocated for this thread -- enable_scn -- SCN of the last thread enable or disable -- create_time -- timestamp of the last thread enable or disable -- status -- 'E' -> enabled, 'D' -> disabled -- Exceptions: -- RT_RESYNC_NOT_STARTED -- BAD_RT_ORDER procedure endThreadResync; -- endThreadResync must be called after all redo threads have been checked /*------------------------* * Online Redo Log resync * *------------------------*/ function beginOnlineRedoLogResync( high_orl_recid IN number ) return boolean; -- beginOnlineRedoLogResync begins the redo log resync. procedure checkOnlineRedoLog( thread# IN number ,group# IN number ,fname IN varchar2 ,bytes IN number default null ,type IN varchar2 default 'ONLINE' ); -- checkOnlineRedoLog inserts or updates information about the given redo log -- It must called once for each redo log currently listed in the -- control file in ascending fname order. procedure endOnlineRedoLogResync; -- endOnlineRedoLogResync must be called after all logs have been checked /*---------------------------------* * Guaranteed Restore Point Resync * *---------------------------------*/ function beginGuaranteedRPResync( high_grsp_recid IN number ) return boolean; -- beginGuaranteedRPResync begins the restore point resync PROCEDURE checkGuaranteedRP( rspname IN VARCHAR2 ,from_scn IN NUMBER ,to_scn IN NUMBER ,resetlogs_change# IN NUMBER ,resetlogs_time IN DATE ,create_time IN DATE DEFAULT NULL ,rsp_time IN DATE DEFAULT NULL ,guaranteed IN VARCHAR2 DEFAULT 'YES' ); -- checkGuaranteedRP inserts or updates information about the -- given restore point. It must called once for each restore point currently -- listed in the control file in ascending fname order. procedure endGuaranteedRPResync; -- endGuaranteedRPResync must be called after all restore point -- have been checked /*----------------------------------* * RMAN Configration records resync * *----------------------------------*/ function beginConfigResync( high_conf_recid IN number ) return number; function beginConfigResync2( high_conf_recid IN number ) return number; -- begiConfigResync2 begins resync of RMAN Configration records. -- beginConfigResync is used only by 9i client. procedure endConfigResync; procedure endConfigResync2(sync_to_cf_pending IN boolean DEFAULT FALSE); -- endConfigResync2 must be called after all RMAN Configurations records -- have been checked. endConfigResync is used only by 9i client. procedure getConfig( conf# OUT number ,name IN OUT varchar2 ,value IN OUT varchar2 ,first IN boolean); -- this getConfig is called during resync from RC to CF. This is duplicate -- of function from dbms_rcvman PL/SQL package /*-----------------------------* * Redo Log History resync * *-----------------------------*/ function beginLogHistoryResync return number; -- beginLogHistoryResync begins the log history resync. It returns the -- highest recid of the log history entries recorded in the recovery catalog. -- The purpose of the returned recid is to avoid processing log history -- entries that are already in rcvcat. function getLogHistoryLowSCN return number; -- getLogHistoryLowSCN returns the highest low_scn that is known to -- recovery catalog for current incarnation. Using this, we skip resyncing -- log history records that are less than low_scn when the controlfile -- is not current. procedure checkLogHistory( rlh_recid IN number ,rlh_stamp IN number ,thread# IN number ,sequence# IN number ,low_scn IN number ,low_time IN date ,next_scn IN number ,reset_scn IN number default NULL ,reset_time IN date default NULL ); -- checkLogHistory inserts or checks the information about given log history -- entry. If entry doesn't exist in rcvcat then it is inserted. If the entry -- already exists then it is compared with the given entry. If there -- is a mismatch an internal error is signalled (the entries shouldn't change) -- checkLogHistory is called once for every redo log history entry in the -- controlfile with a recid greater than or equal to the one returned by -- beginLogHistoryResync. The idea is to check the last entry insert during the -- previous checkpoint. -- Input Parameters: -- rlh_recid -- The controlfile recid of the redo log history entry -- rlh_stamp -- The controlfile stamp of the redo log history entry -- thread# -- Thread number. -- sequence# -- Log sequence number -- low_scn -- The low SCN of this log. -- low_time -- Timestamp associated with lowScn. -- next_scn -- The last SCN of this log. All redo at this SCN or higher -- is in a subsequent log sequence number. procedure endLogHistoryResync; -- endLogHistoryResync will complete redo log history resync and update the -- high_rlh_recid column in dbinc table. /*-------------------------* * Archived Log resync * *-------------------------*/ function beginArchivedLogResync return number; -- beginArchivedLogResync begins archive log resync procedure checkArchivedLog( al_recid IN number ,al_stamp IN number ,thread# IN number ,sequence# IN number ,reset_scn IN number ,reset_time IN date ,low_scn IN number ,low_time IN date ,next_scn IN number ,next_time IN date ,blocks IN number ,block_size IN number ,fname IN varchar2 ,archived IN varchar2 ,completion_time IN date ,status IN varchar2 ,is_standby IN varchar2 DEFAULT NULL ,dictionary_begin IN varchar2 DEFAULT NULL ,dictionary_end IN varchar2 DEFAULT NULL ,is_recovery_dest_file IN varchar2 default 'NO' ,compressed IN varchar2 default 'NO' ,creator IN varchar2 default NULL ,terminal IN varchar2 default 'NO' ); -- checkArchivedLog inserts the archived log entry in rcvcat. If the entry is -- already in rcvcat it will check that it matches the given entry. -- Input Parameters: -- al_recid -- The controlfile recid of the archived log entry -- al_stamp -- The controlfile stamp of the archived log entry -- thread# -- Thread number. -- sequence# -- Log sequence number -- reset_scn -- The resetlogs SCN. -- reset_time -- Timestamp associated with resetScn. -- low_scn -- The low SCN of this log. -- low_time -- Timestamp associated with lowScn. -- next_scn -- The last SCN of this log. All redo at this SCN or higher -- is in a subsequent log sequence number. -- next_time -- Timestamep associated with nextScn. -- blocks -- Number of blocks in this archivelog. -- block_size -- Blocksize of the log. -- fname -- Filename of the archived log. NULL -> log was archived -- status -- 'A' -> log is available, 'U' -> log is unavailable -- is_standby -- 'Y' -> this is a standby archive log, -- 'N' -> this is a primary archive log -- is_recovery_dest_file -- YES - is a recovery destination file. Otherwise, NO. -- compressed -- whether the piece is compressed. procedure endArchivedLogResync; -- endArchivedLogResync completes archived log resync and updates the -- high_al_recid column in dbinc table for the next resync. /*-------------------------* * Offline range resync * *-------------------------*/ function beginOfflineRangeResync return number; -- beginOfflineRangeResync begins the offline range resync. It returns -- the highest offline range recid that was recorded by the previous resync. procedure checkOfflineRange( offr_recid IN number ,offr_stamp IN number ,file# IN number ,create_scn IN number ,offline_scn IN number ,online_scn IN number ,online_time IN date ,cf_create_time IN date ,reset_scn IN number default NULL ,reset_time IN date default NULL ); -- checkOfflineRange inserts a offline range record in the rcvcat. -- Input Parameters: -- offr_recid -- recid of the offline range record in control file -- offr_stamp -- stamp of the offline range record in control file procedure endOfflineRangeResync; -- endOfflineRangeResync completes the offline range resync. /*-------------------------* * Backup Set resync * *-------------------------*/ function beginBackupSetResync return number; -- beginBackupSetResync begins the backup set resync. It returns the highest -- backup set recid that was recorded by the previous resync. procedure checkBackupSet( bs_recid IN number ,bs_stamp IN number ,set_stamp IN number ,set_count IN number ,bck_type IN varchar2 ,incr_level IN number DEFAULT NULL ,pieces IN number ,start_time IN date ,completion_time IN date ,controlfile_included IN VARCHAR2 DEFAULT NULL ,input_file_scan_only IN VARCHAR2 DEFAULT NULL ,keep_options IN number DEFAULT 0 ,keep_until IN date DEFAULT NULL ,block_size IN number DEFAULT NULL ,multi_section IN varchar2 DEFAULT NULL ); -- checkBackupSet inserts a backup set record in the rcvcat. If the record is -- already in rcvcat the completion_time will be checked. -- Input Parameters: -- bck_type -- Type of backup: 'D' = full datafile, 'I' = incremental datafile, -- 'L' = archivelog -- incr_level -- Level of the incremental backup -- bs_recid -- recid of the backup set record in control file -- bs_stamp -- stamp of the backup set record in control file -- completion_time -- time when the backup completed -- ctrlf_ckp_scn -- If the backup set contains a control file, this the checkpoint SCN -- in the control file backup. Null otherwise -- controlfile_included -- Indicates if this backup set has a controlfile in it and its type -- 'YES' or 'SBY'. If no controlfile in the backup set then -- it is 'NO'. -- keep_until --- Keep until some time. -- keep_options --- Keep options (0 means no_keep). -- block_size -- block_size for backupset procedure endBackupSetResync; -- endBackupSetResync completes the backup set resync. /*-------------------------* * Backup piece resync * *-------------------------*/ function beginBackupPieceResync return number; procedure checkBackupPiece( bp_recid IN number ,bp_stamp IN number ,set_stamp IN number ,set_count IN number ,piece# IN number ,tag IN varchar2 ,device_type IN varchar2 ,handle IN varchar2 ,comments IN varchar2 ,media IN varchar2 ,concur IN varchar2 ,start_time IN date ,completion_time IN date ,status IN varchar2 ,copy# IN number default 1 ,media_pool IN number default 0 ,bytes IN number default NULL ,is_recovery_dest_file IN varchar2 default 'NO' ,rsr_recid IN number default NULL ,rsr_stamp IN number default NULL ,compressed IN varchar2 default 'NO' ,encrypted IN varchar2 default 'NO' ,backed_by_osb IN varchar2 default 'NO' ); -- Input Parameters: -- -- bp_recid -- recid of backup piece record in control file -- bp_stamp -- stamp of backup piece record in control file -- piece# -- The ordinal number of this piece within the set. -- tag -- User-specified tag. May be null. -- device_type -- Sequential device type on which this backup set resides. 'DISK' -- specified if on disk rather than sequential media. -- handle -- The media handle on which this backup piece is stored. -- comments -- The comment associated with the backup piece. -- media -- The media id. If multiple backup pieces reside on the same physical -- piece of media, then they have the same mediaId. Note this is not -- the same as a handle, as the handle is specific to a single piece. -- concur -- TRUE if multiple job steps can access the physical media concurrently, -- FALSE if not. -- bytes -- piece size in bytes. If unknown, then null is sent. For example, -- pre-10i backuppieces doesn't have bytes value and inspecting a tape -- backup doesn't populate bytes value. -- is_recovery_dest_file -- YES - is a recovery destination file. Otherwise, NO. -- compressed -- whether the piece is compressed. procedure endBackupPieceResync; /*-------------------------* * Backup Datafile resync * *-------------------------*/ function beginBackupDataFileResync return number; procedure checkBackupDataFile( bdf_recid IN number ,bdf_stamp IN number ,set_stamp IN number ,set_count IN number ,file# IN number ,create_scn IN number ,create_time IN date ,reset_scn IN number ,reset_time IN date ,incr_level IN number ,incr_scn IN number ,ckp_scn IN number ,ckp_time IN date ,abs_fuzzy_scn IN number ,datafile_blocks IN number ,blocks IN number ,block_size IN number ,min_offr_recid IN number ,completion_time IN date ,controlfile_type IN varchar2 DEFAULT NULL ,cfile_abck_year IN number DEFAULT NULL ,cfile_abck_mon_day IN number DEFAULT NULL ,cfile_abck_seq IN number DEFAULT NULL ,chk_last_recid IN boolean DEFAULT TRUE ,blocks_read IN number DEFAULT NULL ,used_chg_track IN varchar2 DEFAULT 'NO' ,used_optim IN varchar2 DEFAULT 'NO' ,foreign_dbid IN number DEFAULT 0 ,plugged_readonly IN varchar2 DEFAULT 'NO' ,plugin_scn IN number DEFAULT 0 ,plugin_reset_scn IN number DEFAULT 0 ,plugin_reset_time IN date DEFAULT NULL ,section_size IN number DEFAULT NULL ); -- Input Parameters: -- recid -- This is the recid of the v$backup_datafile record for this backup. -- stamp -- The timestamp associated with the recid in the controlfile. -- file# -- Datafile number -- create_scn -- The creation SCN of the datafile. -- reset_scn -- The resetlogs SCN of the datafile. Note that different datafiles -- in a backup set may have different resetScn's. This occurs when -- taking a backup of a file that was off-line clean or read-only when -- a resetlogs was done. -- reset_time -- incr_scn -- ckp_scn -- The SCN to which the datafile's checkpoint will be advanced if this -- backup is restored ('F') or applied ('I'). -- ckp_time -- The timestamp associated with ckp_scn. -- abs_fuzzy_scn -- Absolute fuzzy SCN. -- If zero then the file has no fuzziness at all. -- blocks -- Number of blocks written to the backup set for this datafile, or -- the number of blocks in the datafile copy. -- block_size -- The blocksize of the datafile or datafile copy. All files within -- a backup set have the same blocksize, but this need not be enforced -- by this package. -- controlfile_type -- If this a controlfile, it indicates its type ('B' or 'S'), null -- otherwise -- chk_last_recid -- Crosscheck with catalog if this recid is below the previous -- high water mark. procedure endBackupDataFileResync; /*-------------------------* * Backup SPFILE resync * *-------------------------*/ function beginBackupSpFileResync return number; procedure checkBackupSpFile( bsf_recid IN number ,bsf_stamp IN number ,set_stamp IN number ,set_count IN number ,modification_time IN date ,bytes IN number ,chk_last_recid IN boolean DEFAULT TRUE ,db_unique_name IN varchar2 DEFAULT NULL ); -- Input Parameters: -- bsf_recid -- This is the recid of the v$backup_spfile record for this backup. -- bsf_stamp -- The timestamp associated with the recid in the controlfile. -- set_stamp -- The timestamp associated with the parent backup set. -- set_count -- The set_count of the parent backup set. -- modification_time -- The modification time of this SPFILE. -- bytes -- Number of bytes written to the backup set for this SPFILE. -- chk_last_recid -- Crosscheck with catalog if this recid is below the previous -- high water mark. procedure endBackupSpFileResync; /*-------------------------* * Backup Redo Log resync * *-------------------------*/ function beginBackupRedoLogResync return number; procedure checkBackupRedoLog( brl_recid IN number ,brl_stamp IN number ,set_stamp IN number ,set_count IN number ,thread# IN number ,sequence# IN number ,reset_scn IN number ,reset_time IN date ,low_scn IN number ,low_time IN date ,next_scn IN number ,next_time IN date ,blocks IN number ,block_size IN number ,chk_last_recid IN boolean DEFAULT TRUE ,terminal IN varchar2 DEFAULT 'NO' ); -- Input Parameters: -- recid -- The controlfile recid of the archivelog entry. Null if for a backup -- set. -- stamp -- Timestamp associated with copyRecid. -- thread# -- Thread number. -- sequence# -- Log sequence number -- reset_scn -- The resetlogs SCN. -- reset_time -- Timestamp associated with resetScn. -- low_scn -- The low SCN of this log. -- low_time -- Timestamp associated with lowScn. -- next_scn -- The last SCN of this log. All redo at this SCN or higher -- is in a subsequent log sequence number. -- next_time -- Timestamep associated with nextScn. -- blocks -- Number of blocks in this archivelog. -- block_size -- Blocksize of the log. -- chk_last_recid -- Crosscheck with catalog if this recid is below the previous -- high water mark. procedure endBackupRedoLogResync; /*----------------------------* * Copy Datafile resync * *----------------------------*/ function beginDataFileCopyResync return number; procedure checkDataFileCopy( cdf_recid IN number ,cdf_stamp IN number ,fname IN varchar2 ,tag IN varchar2 ,file# IN number ,create_scn IN number ,create_time IN date ,reset_scn IN number ,reset_time IN date ,incr_level IN number ,ckp_scn IN number ,ckp_time IN date ,onl_fuzzy IN varchar2 ,bck_fuzzy IN varchar2 ,abs_fuzzy_scn IN number ,rcv_fuzzy_scn IN number ,rcv_fuzzy_time IN date ,blocks IN number ,block_size IN number ,min_offr_recid IN number ,completion_time IN date ,status IN varchar2 ,controlfile_type IN varchar2 DEFAULT NULL ,keep_options IN number DEFAULT 0 ,keep_until IN date DEFAULT NULL ,scanned IN varchar2 DEFAULT 'NO' ,is_recovery_dest_file IN varchar2 DEFAULT 'NO' ,rsr_recid IN number DEFAULT NULL ,rsr_stamp IN number DEFAULT NULL ,marked_corrupt IN number DEFAULT NULL ,foreign_dbid IN number DEFAULT 0 ,plugged_readonly IN varchar2 DEFAULT 'NO' ,plugin_scn IN number DEFAULT 0 ,plugin_reset_scn IN number DEFAULT 0 ,plugin_reset_time IN date DEFAULT NULL ); -- Input Parameters: -- recid -- If this is a datafile copy, this is the recid of the controlfile -- record for this copy. -- stamp -- The timestamp associated with the recid in the controlfile. -- fname -- The filename of the copy file. -- tag -- Optional tag for a datafile copy -- file# -- Datafile number -- create_scn -- The creation SCN of the datafile. -- reset_scn -- The resetlogs SCN of the datafile. Note that different datafiles -- in a backup set may have different resetScn's. This occurs when -- taking a backup of a file that was off-line clean or read-only when -- a resetlogs was done. -- reset_time -- ckp_scn -- The SCN in the file's header. -- ckp_time -- The timestamp associated with ckp_scn. -- abs_fuzzy_scn -- Absolute fuzzy SCN. -- If 0, then the file is not fuzzy at all. -- blocks -- File size. -- block_size -- Number of bytes in each logical block. -- controlfile_type -- The type of controlfile copy this is 'B' or 'S'. -- null if not a controlfile -- keep_until --- Keep until some time. -- keep_options --- Keep options (0 means no_keep). -- is_recovery_dest_file -- YES - is a recovery destination file. Otherwise, NO. procedure endDataFileCopyResync; /*----------------------------* * Corrupt Block resync * *----------------------------*/ function beginBackupCorruptionResync return number; procedure checkBackupCorruption( bcb_recid IN number ,bcb_stamp IN number ,set_stamp IN number ,set_count IN number ,piece# IN number ,file# IN number ,block# IN number ,blocks IN number ,corrupt_scn IN number ,marked_corrupt IN varchar2 ,corruption_type IN varchar2 default NULL ); procedure endBackupCorruptionResync; function beginCopyCorruptionResync return number; procedure checkCopyCorruption( ccb_recid IN number ,ccb_stamp IN number ,cdf_recid IN number ,cdf_stamp IN number ,file# IN number ,block# IN number ,blocks IN number ,corrupt_scn IN number ,marked_corrupt IN varchar2 ,corruption_type IN varchar2 default NULL ); procedure endCopyCorruptionResync; -- addCorrupt is called to add information about a block corruption in a -- datafile copy or backup datafile. -- -- Input Parameters: -- bdf_key -- Primary key of the bdf record which was returned by addBackupDatafile. -- block# -- The block number of the corrupt block. -- corrupt_scn -- SCN of the redo that corrupted the block for logically corrupt blocks. -- Zero if the block is media corrupt. -- marked_corrupt -- 'T' if we could not read the block from disk and had to reformat -- the block as corrupt for storing in the backup set or copy. 'F' -- otherwise. -- piece# -- The backup piece where the corrupt block was written. Null if -- this is for a datafile copy. /*----------------------------* * Deleted Object resync * *----------------------------*/ function beginDeletedObjectResync return number; procedure checkDeletedObject( do_recid IN number ,do_stamp IN number ,object_type IN varchar2 ,object_recid IN number ,object_stamp IN number ,object_data IN number DEFAULT NULL ,object_fname IN varchar2 DEFAULT NULL ,object_create_scn IN number DEFAULT NULL ,set_stamp IN number DEFAULT NULL ,set_count IN number DEFAULT NULL ); procedure endDeletedObjectResync; /*-------------------* * Proxy Copy resync * *-------------------*/ function beginProxyResync return number; procedure checkProxyDataFile( xdf_recid IN number ,xdf_stamp IN number ,tag IN varchar2 ,file# IN number ,create_scn IN number ,create_time IN date ,reset_scn IN number ,reset_time IN date ,incr_level IN number ,ckp_scn IN number ,ckp_time IN date ,onl_fuzzy IN varchar2 ,bck_fuzzy IN varchar2 ,abs_fuzzy_scn IN number ,rcv_fuzzy_scn IN number ,rcv_fuzzy_time IN date ,blocks IN number ,block_size IN number ,min_offr_recid IN number ,device_type IN varchar2 ,handle IN varchar2 ,comments IN varchar2 ,media IN varchar2 ,media_pool IN number ,start_time IN date ,completion_time IN date ,status IN varchar2 ,controlfile_type IN varchar2 DEFAULT NULL ,keep_options IN number DEFAULT 0 ,keep_until IN date DEFAULT NULL ,rsr_recid IN number DEFAULT NULL ,rsr_stamp IN number DEFAULT NULL ,foreign_dbid IN number DEFAULT 0 ,plugged_readonly IN varchar2 DEFAULT 'NO' ,plugin_scn IN number DEFAULT 0 ,plugin_reset_scn IN number DEFAULT 0 ,plugin_reset_time IN date DEFAULT NULL ); PROCEDURE checkProxyArchivedLog( xal_recid IN NUMBER ,xal_stamp IN NUMBER ,tag IN VARCHAR2 ,thread# IN NUMBER ,sequence# IN NUMBER ,resetlogs_change# IN NUMBER ,resetlogs_time IN DATE ,first_change# IN NUMBER ,first_time IN DATE ,next_change# IN NUMBER ,next_time IN DATE ,blocks IN NUMBER ,block_size IN NUMBER ,device_type IN VARCHAR2 ,handle IN VARCHAR2 ,comments IN VARCHAR2 ,media IN VARCHAR2 ,media_pool IN NUMBER ,start_time IN DATE ,completion_time IN DATE ,status IN VARCHAR2 ,rsr_recid IN NUMBER ,rsr_stamp IN NUMBER ,terminal IN VARCHAR2 default 'NO' ,keep_until IN DATE default NULL ,keep_options IN NUMBER default 0 ); -- Input Parameters: -- recid -- If this is a datafile/archivelog copy, this is the recid of the -- controlfile record for this copy. -- stamp -- The timestamp associated with the recid in the controlfile. -- fname -- The filename of the copy file. -- tag -- Optional tag for a datafile copy -- file# -- Datafile number -- thread# -- Thread number -- sequence# -- Log sequence number -- create_scn -- The creation SCN of the datafile. -- reset_scn, resetlogs_change# -- The resetlogs SCN of the datafile. Note that different datafiles -- in a backup set may have different resetScn's. This occurs when -- taking a backup of a file that was off-line clean or read-only when -- a resetlogs was done. -- reset_time, resetlogs_time -- Timestamp associated with reset_scn -- ckp_scn -- The SCN in the file's header. -- ckp_time -- The timestamp associated with ckp_scn. -- abs_fuzzy_scn -- Absolute fuzzy SCN. -- If 0, then the file is not fuzzy at all. -- blocks -- File size. -- block_size -- Number of bytes in each logical block. -- handle -- The media handle on which this proxy backup is stored. -- comments -- The comment associated with the proxy backup. -- media -- The media id. If multiple proxy copies reside on the same physical -- piece of media, then they have the same mediaId. Note this is not -- the same as a handle, as the handle is specific to a single piece. -- start_time -- The time that the media management layer was told it could start -- copying the file. -- completion_time -- The time that the media management layer reported that it was done -- copying the file. -- status -- A or D - available or deleted -- controlfile_type -- The type of controlfile copy this is 'B' or 'S'. -- null if not a controlfile -- keep_until -- Keep until some time. -- keep_options -- Keep options (0 means no_keep). -- first_change# -- SCN generated when switching in -- first_time -- Timestamp associated with first_change# -- next_change# -- SCN generated when switching out -- next_time -- Timestamp associated with next_time procedure endProxyResync; /*-------------------------* * Incarnation resync * *-------------------------*/ FUNCTION beginIncarnationResync(return_Recid in boolean DEFAULT FALSE) return number; -- beginIncarnationResync begins the database incarnation tree resync. It -- returns the controlfile timestamp seen by previous resync, if return_Recid -- is FALSE. Otherwise returns last recid of the incarnation record resynced -- from same controlfile (i.e., treated like other circular record sections). function checkIncarnation(reset_scn IN NUMBER, reset_time IN DATE, prior_reset_scn IN NUMBER DEFAULT NULL, prior_reset_time IN DATE DEFAULT NULL, db_name IN VARCHAR2 DEFAULT 'UNKNOWN') return number; -- checkIncarnation checks if reset_scn, reset_time incarnation is present -- in dbinc table. Ifn't inserts this entry into dbinc table. Returns the -- corresponding dbinc_key entry. procedure endIncarnationResync(high_kccdivts IN NUMBER, high_ic_recid IN NUMBER DEFAULT 0); -- endIncarnationResync records this controlfile timestamp / high recid id -- depending on whether resync is based on controlfile stamp or recid's. /*-----------------------------* * Normal restore point Resync * *-----------------------------*/ FUNCTION beginRestorePointResync RETURN NUMBER; PROCEDURE checkRestorePoint( nrsp_recid IN NUMBER ,nrsp_stamp IN NUMBER ,nrsp_name IN VARCHAR2 ,reset_scn IN NUMBER ,reset_time IN DATE ,to_scn IN NUMBER ,nrsp_time IN DATE ,create_time IN DATE ,deleted IN NUMBER ); PROCEDURE endRestorePointResync(lowrecid IN number); /*----------------------------* * RMAN Status resync * *----------------------------*/ FUNCTION beginRmanStatusResync RETURN NUMBER; -- beginRmanStatusResync begins the resync of the RMAN status records. It -- returns the controlfile timestamp seen by previous resync. PROCEDURE checkRmanStatus( recid IN NUMBER ,stamp IN NUMBER ,parent_recid IN NUMBER ,parent_stamp IN NUMBER ,row_level IN NUMBER ,row_type IN VARCHAR2 ,command_id IN VARCHAR2 ,operation IN VARCHAR2 ,status IN VARCHAR2 ,mbytes_processed IN NUMBER ,start_time IN DATE ,end_time IN DATE ,ibytes IN NUMBER default null ,obytes IN NUMBER default null ,optimized IN VARCHAR2 default null ,otype IN VARCHAR2 default null ,session_recid IN NUMBER default null ,session_stamp IN NUMBER default null ,odevtype IN VARCHAR2 default null ,osb_allocated IN VARCHAR2 default 'NO'); -- -- checkRmanStatus checks if this RMAN status row is present in the rsr table. -- If it is not present, then it inserts this row into the table. -- -- Input Parameters: -- recid -- Recid of the rman status record in the controlfile. -- stamp -- The timestamp associated with the recid in the controlfile. -- parent_recid -- The recid of the parent record. Might be null.. -- parent_stamp -- The timestamp associated with the parent+_recid in the controlfile. -- Might be null. -- row_level -- Row level. -- command_id -- Command id of the row. -- operation -- Operation accociated with this row. -- status -- Status of the operation -- mbytes_processed -- amount of data processed with the operation. -- start_time -- The time when operation begin. -- start_end -- The time when operation ends. Might be null. -- ibytes -- The amount of input data in bytes processed -- obytes -- The amount of output data written in bytes during the command PROCEDURE endRmanStatusResync(recid number); -- endRmanStatusResync records the input recid as high_rsr_recid. PROCEDURE updateRmanStatusRow( recid IN number ,stamp IN number ,mbytes IN number ,status IN binary_integer); -- commitRmanStatusRow updates the row with status and mbytes. /*----------------------------* * RMAN Output resync * *----------------------------*/ FUNCTION beginRmanOutputResync(start_timestamp in NUMBER) RETURN NUMBER; -- beginRmanOutputResync begins the resync of the RMAN output records. It -- returns the timestamp seen by previous resync of RMAN output rows. PROCEDURE checkRmanOutput( recid IN NUMBER ,stamp IN NUMBER ,session_recid IN NUMBER ,session_stamp IN NUMBER ,rman_status_recid IN NUMBER ,rman_status_stamp IN NUMBER ,output IN VARCHAR2); -- -- checkRmanOut checks if this RMAN Output row is present in the rout table. -- If it is not present, then it inserts this row into the table. -- -- Input Parameters: -- stamp -- The timestamp associated with the recid in the memory. -- session_recid -- The recid of the session record indicator. -- session_stamp -- The timestamp associated with the session record indicator. -- rman_status_recid -- recid of the control file record corresponding to the command. -- rman_status_stamp -- stamp of the control file record corresponding to the command. -- output -- text ouput from the command execution. PROCEDURE endRmanOutputResync; -- endRmanOutputResync records the input stamp as high_rout_stamp. /*----------------------------* * Block Corruption Resync * *----------------------------*/ function beginBlockCorruptionResync( low_bcr_recid IN number) return number; procedure checkBlockCorruption( bcr_recid IN number ,bcr_stamp IN number ,file# IN number ,create_scn IN number ,create_time IN date ,block# IN number ,blocks IN number ,corrupt_scn IN number ,corruption_type IN varchar2 ); procedure endBlockCorruptionResync; -- -- Input Parameters: -- bcr_recid and bcr_stamp -- X$KCCBLKCOR recid and stamp -- file#, create_scn and create_time -- Datafile number, creation scn and creation time to which this -- corruption block range belongs. -- block# -- The block number of the corrupt block. -- blocks -- Number of blocks corrupted from block#. -- corrupt_scn -- SCN of the redo that corrupted the block for logically corrupt blocks. -- Zero if the block is media corrupt. -- corruption_type -- One of following types: ALL ZERO, FRACTURED, CHECKSUM, CORRUPT -- /*----------------------------* * Change Procedures * *----------------------------*/ procedure changeDataFileCopy( cdf_recid IN number ,cdf_stamp IN number ,status IN varchar2 ,keep_options IN number DEFAULT NULL -- null means don't update ,keep_until IN date DEFAULT NULL ,osite_key IN number DEFAULT NULL ,nsite_key IN number DEFAULT NULL ); -- changeDatafileCopy is called to change the availability, delete a -- datafile copy or to change keep optsion of a datafile copy -- in recovery catalog procedure changeControlfileCopy( cdf_recid IN number ,cdf_stamp IN number ,status IN varchar2 ,keep_options IN number DEFAULT NULL -- null means don't update ,keep_until IN date DEFAULT NULL ,osite_key IN number DEFAULT NULL ,nsite_key IN number DEFAULT NULL ); -- changeControlfileCopy is called to change the availability or delete a -- controlfile copy in recovery catalog procedure changeArchivedLog( al_recid IN number ,al_stamp IN number ,status IN varchar2 ,osite_key IN number DEFAULT NULL ,nsite_key IN number DEFAULT NULL ); -- changeArchivedLog is called to change the availability or delete a -- datafile copy in recovery catalog procedure changeBackupSet( recid IN number ,stamp IN number ,keep_options IN number -- null means don't update ,keep_until IN date ,osite_key IN number DEFAULT NULL ,nsite_key IN number DEFAULT NULL ); -- changeDatafileCopy is called to change the availability, delete a -- datafile copy or to change keep optsion of a datafile copy -- in recovery catalog procedure changeBackupPiece( bp_recid IN number ,bp_stamp IN number ,status IN varchar2 ,set_stamp IN number DEFAULT NULL ,set_count IN number DEFAULT NULL ,osite_key IN number DEFAULT NULL ,nsite_key IN number DEFAULT NULL ); -- changeBackupPiece is called to change the availability of or delete a backup -- piece procedure changeProxyCopy( pc_recid IN number ,pc_stamp IN number ,status IN varchar2 ,keep_options IN number DEFAULT NULL -- null means don't update ,keep_until IN date DEFAULT NULL ,osite_key IN number DEFAULT NULL ,nsite_key IN number DEFAULT NULL ); -- changeProxyCopy is called to change the availability of or delete a -- proxy copy /*----------------------------* * Stored Script Procedures * *----------------------------*/ procedure createScript(name IN varchar2); procedure createScript(name IN varchar2, scr_com IN varchar2, global IN boolean); procedure replaceScript(name IN varchar2); procedure replaceScript(name IN varchar2, scr_com IN varchar2, global IN boolean); -- These procedures create or replace a script with the given name. Each -- database registered in the catalog has its own namespace for scripts. -- The putLine function must be called to pass the lines of the script -- to the catalog. procedure putLine(line IN varchar2); -- This procedure passes 1 line of the script to the catalog. The client -- must call this until all lines have been passed in. After the last line, -- call putLine(NULL) to indicate that all lines have been passed in. -- The maximum line length is 1024 bytes. procedure deleteScript(name IN varchar2); procedure deleteScript(name IN varchar2, glob IN number); -- Deletes a script. If glob is TRUE, it deletes a global script. procedure getScript(name IN varchar2); procedure getScript(name IN varchar2, glob IN number); -- This procedure gets a script from the recovery catalog. The client must -- call getLine to fetch the lines of the script. If glob is TRUE it gets a -- global script. function getLine return varchar2; -- This function returns 1 line of the stored script. When all lines have -- been passed back, it returns NULL. procedure commitChanges; -- Just does a commit; /*---------------------------------------* * Procedures for clone database support * *---------------------------------------*/ procedure setCloneName(file# IN number ,creation_change# IN number ,new_clone_fname IN varchar2 ,old_clone_fname IN varchar2 ,changedauxname OUT boolean ,plugin_change# IN number DEFAULT 0); FUNCTION getCloneName(file# IN number ,creation_change# IN number ,plugin_change# IN number DEFAULT 0) RETURN VARCHAR2; /*-----------------------------------* * Procedures for RMAN configuration * *-----------------------------------*/ PROCEDURE setConfig(conf# IN NUMBER ,name IN VARCHAR2 ,value IN VARCHAR2); PROCEDURE resetConfig; PROCEDURE setConfig2(conf# IN NUMBER ,name IN VARCHAR2 ,value IN VARCHAR2 ,nodespec IN BOOLEAN); PROCEDURE resetConfig2(nodespec IN BOOLEAN, high_conf_recid IN NUMBER DEFAULT NULL); PROCEDURE deleteConfig(conf# IN NUMBER); FUNCTION setConfig3(name IN VARCHAR2 ,value IN VARCHAR2 ,db_unique_name IN VARCHAR2) RETURN NUMBER; PROCEDURE deleteConfig3(conf# IN NUMBER ,db_unique_name IN VARCHAR2); /*----------------------------* * Version info * *----------------------------*/ function getPackageVersion return varchar2; function getCatalogVersion return varchar2; /*-------------------* * Utility functions * *-------------------*/ /* * NAME * bsStstusRecalc * DESCRIPTION * Recompute the backupset status for all backupset whose current * status is a specified value. This is intended to be used when * new values are introduced for the bs.status column. */ PROCEDURE bsStatusRecalc(status IN varchar2); -- reNormalize is the interface to the recovery catalog used to apply a -- one-time fixup on Windows-NT by re-applying the normalization algorithm, -- which has been improved for 8.1.6. -- The first call to reNormalize passes a null value for newname, to -- initialize the procedure. Each call returns an oldname which needs to be -- normalized. The caller normalizes the name and passes it back to -- reNormalize, in newname, on the next call. reNormalize will then update -- the previous row and return the next one. oldname is returned to the caller -- as null when there are no more names. PROCEDURE reNormalize(newname IN varchar2, oldname OUT varchar2); -- sanityCheck is a procedure that performs internal consistency checks, and -- cleanup, of the recovery catalog. It is the last thing that is done during -- resync. PROCEDURE sanityCheck; -- This function gets the sfile_key of the file in the catalog -- if the catalog does not have the file then the routine fails if createfile -- if 0, if createfile is 1 then a new record is created -- It returns the key of the record created. -- If the file already exists in the catalog, the current key is -- returned and the newfile flag is set, so that it can be updated. -- A special case is when this_fname is null, in this case the routine -- returns the current db_key, this is needed for a posterior select -- of all the stored files in the catalog for the current database (db_key) -- in this case createfile and newfile are ignored. FUNCTION getDbid RETURN NUMBER; PROCEDURE listScriptNames(glob IN number, allnames IN number); -- Opens the cursor to fetch the names of the scripts that stored in the -- recovery catalog. -- By default scripts created by the target database are fetched and also -- all the global scripts. -- If GLOB, is TRUE then only the global scripts are fetched. -- If ALLNAMES, is TRUE, then all scripts from all databases are fetched. PROCEDURE getScriptNames(dbname OUT varchar2, scnm OUT varchar2, sccom OUT varchar2); -- Fetch one row from the cursor opened by listScriptNames. -- It returns the name of the target database to which the script belongs, -- ORAGLOBAL is used to indicate the script is a GLOBAL script, the name of -- the script and the comment associated at the script (NULL) if no comment -- is stored in the recovery catalog. PROCEDURE updateOldestFlashbackSCN( oldest_flashback_scn IN NUMBER, oldest_flashback_time IN DATE DEFAULT NULL); -- Updates the current dbinc record with the oldest_flashback_scn (aka -- guaranteed flashback scn) and oldest_flashback time that is passed in. FUNCTION getDbinc RETURN NUMBER; -- Returns the package constant this_dbinc_key FUNCTION isDuplicateRecord(recid IN NUMBER ,stamp IN NUMBER ,type IN VARCHAR2) RETURN BOOLEAN; -- Return TRUE if there exists a record with (recid, stamp) in catalog -- table identified by type. Otherwise, FALSE. -- Type can one of following values: -- 1. 'AL' (archived log) -- 2. 'BP' (backup piece) -- 3. 'DC' (datafile copy) FUNCTION doDuplicateMining RETURN BOOLEAN; -- Return TRUE if we have to do mining in order to determine the closest -- resync time. Otherwise, FALSE. -- Unregister a standby site. This procedures removes the metadata stored in -- recovery catalog for the given standby node. PROCEDURE unregisterSite(db_unique_name IN VARCHAR2, incbcks IN binary_integer); -- rename a site name. This procedures renames the metadata stored in -- recovery catalog for the given standby node. PROCEDURE renameSite(from_db_unique_name IN VARCHAR2, to_db_unique_name IN VARCHAR2); -- Add db_unique_name to node -- Add a new entry to the node table if it doesn't exist. PROCEDURE resyncAddDBUname(cdbunstr IN varchar2); -- Function used to return site key for a required site from catalog schema... -- If not db_unique_name is specified, it returns current site key. FUNCTION getThisSiteKey(db_unique_name in VARCHAR2 DEFAULT NULL) return NUMBER; PROCEDURE enableResyncActions; PROCEDURE setReason(reason IN number, forceSet IN boolean default FALSE); FUNCTION getReason RETURN number; PROCEDURE incResyncActions(action IN number, objno IN number, objname IN varchar2); PROCEDURE getResyncActions(valid OUT boolean ,added OUT number ,dropped OUT number ,changed OUT number ,recreated OUT number ,renamed OUT number ,resized OUT number); PROCEDURE clearResyncActions; PROCEDURE dumpResyncActions; FUNCTION debOK (level IN number DEFAULT RCVCAT_LEVEL_DEFAULT) RETURN boolean; -- -- createTempResource -- -- Add an entry with the given name and data_type in tempres table. -- PROCEDURE createTempResource( name IN varchar2 ,data_type IN varchar2); -- -- lockTempResource -- -- get a rowlock in tempres table for the given name and data_type. If -- no rowlock can be obtained or there is no such object, then return -- FALSE. Otherwise, TRUE if successful. -- FUNCTION lockTempResource( name IN varchar2 ,data_type IN varchar2) RETURN BOOLEAN; -- -- cleanupTempResource -- -- For each row in tempres table, get a rowlock and then drop the object. -- If the rowlock couldn't be obtained, then try next row. No error is -- signalled. -- PROCEDURE cleanupTempResource; -- -- addDbidToImport -- -- If no dbid and no dbname is provided, then add all the dbid's that exists -- in source recovery catalog database and its incarnations to idb and -- idbinc table correspondingly. -- If dbid is not null, then add the dbid and its incarnation to idb and -- idbinc table correspondingly. -- If dbname is not null, then add the dbid and its incarnation that -- corresponds to the dbname to idb and idbinc table. -- Signal any error when dbid was not found, dbname was ambiguous or -- no database was found. -- PROCEDURE addDbidToImport( first IN binary_integer ,idb IN varchar2 ,idbinc IN varchar2 ,dbid IN number DEFAULT NULL ,dbname IN varchar2 DEFAULT NULL); -- -- lockDbidToImport -- -- Obtain rowlock for all the dbids that are present in idb table on source -- recovery catalog database. This is needed to prevent any update to -- source recovery catalog database while doing import. -- PROCEDURE lockDbidToImport( idb IN varchar2); -- -- importSchema -- -- Use the dblink, idb and idbinc table and import all the recovery catalog -- tables from source recovery catalog database (using dblink provided) -- into destination recovery catalog database. If there is any error, the -- changes are rolled back. -- PROCEDURE importSchema( dblink IN varchar2 ,idb IN varchar2 ,idbinc IN varchar2); -- -- Functions to control file sharing attributes and behavior of RCVCAT -- package. -- -- set Archive log file sharing scope attributes for the session PROCEDURE setArchiveFileScopeAttributes(logs_shared IN NUMBER); -- set Backup file sharing scope attributes for the session PROCEDURE setBackupFileScopeAttributes( disk_backups_shared IN NUMBER, tape_backups_shared IN NUMBER); -- -- unregisterDatabase -- -- unregister all database in idb that was successfully imported from -- source recovery catalog database. -- PROCEDURE unregisterDatabase( idb IN varchar2); /*--------------------------------------* * Virtual Private Catalog Procedures * *--------------------------------------*/ PROCEDURE grant_catalog(userid IN varchar2, dbname IN varchar2); PROCEDURE grant_catalog(userid IN varchar2, dbid IN number); PROCEDURE grant_register(userid IN varchar2); PROCEDURE revoke_catalog(userid IN varchar2, dbname IN varchar2); PROCEDURE revoke_catalog(userid IN varchar2, dbid IN number); PROCEDURE revoke_register(userid IN varchar2); PROCEDURE revoke_all(userid IN varchar2); PROCEDURE create_virtual_catalog; PROCEDURE drop_virtual_catalog; PROCEDURE dumpPkgState (msg in varchar2 default NULL); end dbms_rcvcat; >>> # # NAME # dbmsrman.sql - Recovery MANager package specification # # DESCRIPTION # This package contains procedures querying information that # Recovery Manager needs from the recovery catalog or the target # database controlfile # # NOTES # This package is only intended to be used by the Recovery Manager # # Note that there are two implementations of this package. One is # a normal stored package (prvtrmnu.plb) that get its information from # the recovery catalog. The other one is a fixed package (prvtrmns.plb) # that gets its information from the target database controlfile. # # Tags, tablepace names and filenames must be passed exactly as # stored in the controlfile and the recovery catalog. RMAN must # uppercase tag and tablespace names (unless quoted) and # normalize filenames before passing them to dbms_rcvman. # # Do not use boolean types in the package specification since # RMAN accesses it using embedded SQL. Use numbers instead, # 0 (FALSE#) and 1 (TRUE#). # # MODIFIED (MM/DD/YY) # swerthei 06/01/98 - proxy copy name translation # swerthei 05/27/98 - proxy restore # dbeusee 05/18/98 - misc_81_fixes_1: added initialize(rman_vsn) and # translateDatafile(fno, ckpscn) # dbeusee 04/21/98 - rpt_redundancy_enh # dbeusee 03/01/98 - list enh. # gpongrac 12/22/97 - change findcontrolfilebackup # gpongrac 09/03/97 - have dfcopy name xlat return filesize # tpystyne 09/11/97 - bug 480172, fix name translation # swerthei 08/29/97 - add getdatafile.read_only # swerthei 08/18/97 - add getdatafile.stop_change# # dalpern 04/16/97 - make fixed packages a distinct "world" # gpongrac 04/11/97 - add cfscn to computeRecoveryActions # gpongrac 04/10/97 - add rlgtime to getRecoveryAction # gpongrac 04/04/97 - add translateBackupPieceTag # gpongrac 04/01/97 - add offline range info to computeRecoveryActions # swerthei 03/27/97 - add getDataFile.ts_name # swerthei 03/12/97 - get file sizes in blocks, for metrics # gpongrac 03/15/97 - add to_time for implicit offline ranges in comput # gpongrac 03/11/97 - remove finddatafilebackup and friends # gpongrac 03/10/97 - add function to set debug flag # gpongrac 03/03/97 - add success return code for computeRecoveryAction # gpongrac 02/28/97 - move comment to body # gpongrac 02/27/97 - add cf_current as arg to computeRecoveryActions # gpongrac 02/26/97 - fix type # gpongrac 02/26/97 - add new recovery methods # swerthei 01/21/97 - change parameters for translateBackupSetKey # swerthei 01/15/97 - add dumpState # gpongrac 01/14/97 - add getCloneName # tpystyne 12/17/96 - add listRollbackSegTableSpace # swerthei 01/08/97 - change parameters for backup piece translation # gpongrac 01/06/97 - add nxtscn to getArchivedLog # swerthei 01/03/97 - add getParentIncarnation # swerthei 12/30/96 - use named constants for backup/copy types # swerthei 12/27/96 - add reportTranslateDFDel, reportGetDFDel # swerthei 12/20/96 - add getDataFile.kbytes # gpongrac 11/14/96 - add setDbincKey # tpystyne 11/13/96 - add reset_scn and time to findDataFileBackup # swerthei 11/08/96 - change parms to translateArchivedLogKey # swerthei 11/08/96 - add findPrevDataFileBackup.ckptime # tpystyne 10/29/96 - change create_scn to an in parameter # swerthei 11/05/96 - add findDataFileBackup.ckp_time # swerthei 11/04/96 - add getdatafile.creation_time # swerthei 11/01/96 - honor setDeviceType in list functions # swerthei 10/31/96 - add setDeviceTypeAny # swerthei 10/31/96 - add getDataFile.unrecoverable_change# # swerthei 10/28/96 - add functions for LIST INCARNATION OF DATABASE # swerthei 10/23/96 - add new functions for LIST command # tpystyne 10/18/96 - online redo log support # gpongrac 10/07/96 - have getDatafile return blksize too # gpongrac 10/02/96 - add getCheckpoint # gpongrac 09/24/96 - change getDataFile to return creation SCN and fna # swerthei 09/12/96 - add getDataFileLocation # tpystyne 08/26/96 - add findOfflineRangeBackup # gpongrac 07/31/96 - add translateArchivedLogCancel # tpystyne 07/10/96 - change translateBackupSet prototypes # tpystyne 06/24/96 - add getIncrementalScn # tpystyne 06/13/96 - add findPrevDataFileBackup # swerthei 06/11/96 - add validation parameters to findDataFileBackup # tpystyne 05/28/96 - separate translateArchiveLogRangeName and Pattern # tpystyne 05/22/96 - add fname parameter to findDataFileBackup # gpongrac 05/13/96 - fix merge errors # tpystyne 05/13/96 - complete beta1 functionality # asurpur 04/09/96 - Dictionary Protection Implementation # tpystyne 05/07/96 - implement restore procedures # tpystyne 04/24/96 - enable restore procedures # gpongrac 03/22/96 - add restore procedures # tpystyne 02/20/96 - more stuff # gpongrac 01/25/96 - remove domain arg from setDatabase # tpystyne 12/06/95 - rename procedures # tpystyne 12/05/95 - Created # define dbmsrman_sql <<< create or replace package dbms_rcvman authid current_user is -- DE-HEAD <- tell SED where to cut ---------------------------------------- -- PUBLIC VARIABLES AND TYPES SECTION -- ---------------------------------------- actual_dbinc_key number := NULL; -- see comments on getActualDbinc TRUE# CONSTANT number := 1; FALSE# CONSTANT number := 0; -- Below public variables used in KQFV.H to obtain summary information based -- on job filter attributes. SESSION_KEY number; SESSION_FROMTIME DATE; SESSION_UNTILTIME DATE; -- The values here must never be changed, because the 8.0 rman executables have -- these values hard-coded in the krmkbt enum in krmk.h. The setFrom procedure -- in particular is using hard-coded values. -- The 8.1.5 rman executable calls a procedure, set_package_constants, that -- re-assigns these constants to whatever the package needs them to be, then -- queries the package for their new values. The 8.1.5 rman does not care -- what the values are, however, the cursor used by reportGetDFDel used to use -- these values to perform an order-by to return rows in preference order. -- The preference order is used to decide which ones to delete. -- As of 8.1.6, the order-by in reportGetDFDel is independant of these values. -- The 8.1.6 rman does not use these values at all, except in setFrom. -- However, for backwards compatibility with the 8.1.5 RMAN, these must remain -- as public package constants. COPY number := 1; -- any image copy of a file FULL_DF_BACKUP number := 2; -- datafile in a full backup set INCREMENTAL_DF_BACKUP number := 3; -- datafile in an incr backup set BACKUP number := 4; -- any file in a backup set (incl proxy) OFFLINE_RANGE number := 5; -- an offline range CUMULATIVE number := 6; -- cumulative incremental - for LIST only PROXY number := 7; -- any proxy copy of a file NONPROXY number := 9; -- any image, backup set other than proxy -- Recovery Action Kinds (Obsolete as of 8.1.6) -- implicitOfflRange CONSTANT NUMBER := 2**0; cleanRange CONSTANT NUMBER := 2**1; applyOfflRange CONSTANT NUMBER := 2**2; dfCopy CONSTANT NUMBER := 2**3; proxyFull CONSTANT NUMBER := 2**4; buSet CONSTANT NUMBER := 2**5; applyIncremental CONSTANT NUMBER := 2**6; redo CONSTANT NUMBER := 2**7; -- kind masks maxKind CONSTANT NUMBER := redo; -- last real kind above allKind CONSTANT NUMBER := (maxKind*2) - 1; -- all real backup types fullKind CONSTANT NUMBER := dfCopy + proxyFull + buSet; tagKind CONSTANT NUMBER := fullKind + applyIncremental; -- pseudo kinds deletedKind CONSTANT NUMBER := maxKind*2; -- action deleted ---------------------------------- -- Backupset Availability Masks -- ---------------------------------- BSavailable CONSTANT BINARY_INTEGER := 2**0; BSunavailable CONSTANT BINARY_INTEGER := 2**1; BSdeleted CONSTANT BINARY_INTEGER := 2**2; BSexpired CONSTANT BINARY_INTEGER := 2**3; -- BSpartial_avail is a backupset validation mask and NOT a backuppiece -- filter. For eg. to get 'A', 'U', 'X' pieces and to enable validation -- to succeed for partially available backupset use -- BSpartial_avail + BSavailable + BSunavailable + BSexpired. BSpartial_avail CONSTANT BINARY_INTEGER := 2**4; ---------------------- -- BackupType Mask --- ---------------------- BSdatafile_full CONSTANT BINARY_INTEGER := 2**0; BSdatafile_incr CONSTANT BINARY_INTEGER := 2**1; BSarchivelog CONSTANT BINARY_INTEGER := 2**2; --------------------------- -- ControlfileType Mask --- --------------------------- BScfile_all CONSTANT BINARY_INTEGER := 2**0; -- shouldn't be altered BScfile_auto CONSTANT BINARY_INTEGER := 2**1; --------------------- -- Datafile Record -- --------------------- TYPE dfRec_t IS RECORD ( dfNumber number, dfCreationSCN number, dfCreationTime date, fileName varchar2(1024), tsName varchar2(30), tsNumber number, status number, blocks number, blockSize number, kbytes number, unrecovSCN number, stopSCN number, readOnly number, rfNumber number, inBackup number, -- if greater than 0 then -- included_in_database_backup is set auxName varchar2(1024), dbincKey number, dfOfflineSCN number, dfOnlineSCN number, dfOnlineTime date, encrypt number, -- encrypt value 1=ON, 2=OFF, 3=CLEAR foreignDbid number, -- foreign database id pluggedRonly binary_integer, -- 1 for read-only. Otherwise, 0 pluginSCN number, -- plugin change# pluginRlgSCN number, -- plugin resetlogs_change# pluginRlgTime date, -- plugin resetlogs_time newDfCreationSCN number, -- plugin scn or creation scn creation_thread number, -- creation thread creation_size number -- creation size ); --------------------- -- Tempfile Record -- --------------------- TYPE tfRec_t IS RECORD ( tfNumber number, tfCreationSCN number, tfCreationTime date, fileName varchar2(1024), tsName varchar2(30), tsNumber number, status number, isSFT varchar2(3), blocks number, blockSize number, maxSize number, nextSize number, rfNumber number, dbincKey number ); ------------------------- -- Archived Log Record -- ------------------------- TYPE alRec_t IS RECORD ( key number, recid number, stamp number, thread number, sequence number, fileName varchar2(1024), lowSCN number, lowTime date, nextSCN number, nextTime date, rlgSCN number, rlgTime date, blocks number, blockSize number, status varchar2(1), compTime date, duplicate number, isrdf varchar2(3), compressed varchar2(3), stby varchar2(1), terminal varchar2(3), site_key number, site_key_order_col number, source_dbid number ); -- All of the queries which return data about a backup/imagecopy/proxycopy -- select into a rcvRec_t record type. We have standardized all of our -- queries to have a common select-list and the results of the queries are -- returned through a common public package function. The reason for this is -- so that krmk.pc can populate its internal data structures consistantly, -- regardless of what particular procedure it has called to query the catalog. -- By having all queries select into the same record type, we can ensure -- that all queries use the same select list. Any new fields that get added -- to this record will require updating the select lists of all queries. -- Failure to make the correct updates will result in PLSQL giving an error -- when the package body is re-created, so the error will be easily detected -- without the need to run any test suite. -- The record is divided into three sections. These correpond to -- three krmk.h data structures which will be populated with the data -- from this record. Refer to krmk.h for a description of the purpose -- of each of these three data strucutres. -- Think of this as: the container acts on the object. --------------------- -- Recovery Record -- --------------------- TYPE rcvRec_t IS RECORD ( -- *** Recovery Container Section *** type_con number(3), -- recovery container type key_con number(15), -- primary key recid_con number(10), -- recid stamp_con number(10), -- stamp setStamp_con number(10), -- set count if backup set (null) setCount_con number(10), -- set stamp if backup set (null) bsRecid_con number(10), -- backup set recid (null) bsStamp_con number(10), -- backup set stamp (null) bsKey_con number(15), -- backup set key (null) bsLevel_con number(1), -- backup set level (null) bsType_con varchar2(1), -- backup set type elapseSecs_con number(10), -- backup set elapse seconds (null) pieceCount_con number(5), -- backup set piece count (null) fileName_con varchar2(1024), -- filename if a copy (or) piece (null) tag_con varchar2(32), -- tag (null) -- filled in by addAction() for -- backup sets copyNumber_con number(3), -- backup set copy# (null) maxlimit 256 -- filled in by addAction() only status_con varchar2(1), -- status (null) blocks_con number(10), -- size of file in blocks (null) blockSize_con number(5), -- block size (null) deviceType_con varchar2(255), -- device type required (null) -- filled in by addAction() for -- backup sets compTime_con date, -- completion time cfCreationTime_con date, -- controlfile creation time if -- offline range (null) pieceNumber_con number, bpCompTime_con date, bpCompressed_con varchar2(3), multi_section_con varchar2(1), -- multi-section backup piece -- *** Recovery Action Section *** type_act number(3), -- recovery action type fromSCN_act number(15), toSCN_act number(15), toTime_act date, rlgSCN_act number(15), rlgTime_act date, dbincKey_act number(15), level_act number(1), section_size_act number, -- *** Recovery Object Section *** dfNumber_obj number(6), dfCreationSCN_obj number(15), cfSequence_obj number(15), -- controlfile autobackup sequence cfDate_obj date, -- controlfile autobackup date logSequence_obj number(10), logThread_obj number(4), logRlgSCN_obj number(15), logRlgTime_obj date, logLowSCN_obj number(15), logLowTime_obj date, logNextSCN_obj number(15), logNextTime_obj date, logTerminal_obj varchar2(3), cfType_obj varchar2(1), -- controlfile type ('B' or 'S') -- *** Retention Policy Section *** keep_options number(4), keep_until date, -- *** Optimization Action Section *** afzSCN_act number(15), rfzTime_act date, rfzSCN_act number(15), -- *** media Action Section *** media_con varchar2(80), -- media volume name for backup piece isrdf_con varchar2(3), -- ** site specific information for recovery action *** site_key_con number, -- *** plugged Section *** foreignDbid_obj number, -- foreign database id pluggedRonly_obj binary_integer, -- 1 for read-only. Otherwise, 0 pluginSCN_obj number, -- plugin change# pluginRlgSCN_obj number, -- plugin resetlogs change# pluginRlgTime_obj date, -- plugin resetlogs time -- ** sort order Section *** newDfCreationSCN_obj number, -- plugin scn or creation scn newToSCN_act number, -- plugin scn or checkpoint scn newRlgSCN_act number, -- plugin rlgscn or rlgscn newRlgTime_act date, -- plugin rlgtime or rlgtime -- ** SPFILE specific data ** sfDbUniqueName_obj VARCHAR2(30) ); ------------------------------ -- Recovery Container Types -- ------------------------------ -- -- NOTE!!! NOTE!!! NOTE!!! -- -- You must never change these constants values between releases. Doing so -- would break the compatibility by making lower version of RMAN executable -- not able to talk to this recovery catalog. We have never changed these -- constants from 8.1.5 onwards. See bug 893864 for details. -- -- NOTE: Order is important, it is used in an ORDER BY. offlineRangeRec_con_t CONSTANT NUMBER := 2**0; proxyCopy_con_t CONSTANT NUMBER := 2**1; imageCopy_con_t CONSTANT NUMBER := 2**2; backupSet_con_t CONSTANT NUMBER := 2**3; addredo_con_t CONSTANT NUMBER := 2**4; deleted_con_t CONSTANT NUMBER := 2**8; datafile_con_t CONSTANT NUMBER := 2**9; -- Masks backupMask_con_t CONSTANT NUMBER := proxyCopy_con_t + imageCopy_con_t + backupSet_con_t; tagMask_con_t CONSTANT NUMBER := proxyCopy_con_t + imageCopy_con_t + backupSet_con_t; --------------------------- -- Recovery Action Types -- --------------------------- full_act_t CONSTANT NUMBER := 2**0; incremental_act_t CONSTANT NUMBER := 2**1; redo_act_t CONSTANT NUMBER := 2**2; offlineRange_act_t CONSTANT NUMBER := 2**3; cleanRange_act_t CONSTANT NUMBER := 2**4; implicitRange_act_t CONSTANT NUMBER := 2**5; spanningRange_act_t CONSTANT NUMBER := 2**6; createdatafile_act_t CONSTANT NUMBER := 2**7; ----------------------------------------- -- Recovery Record Returning Functions -- ----------------------------------------- -- These defines are used as the funCode arg to getRcvRec to tell it which -- function it should call. We do this so that krmk.pc can have a single -- interface routine for getting a rcvRec_t. getCfCopy CONSTANT NUMBER := 0; getDfCopy CONSTANT NUMBER := 1; getAnyProxy CONSTANT NUMBER := 2; getCfBackup CONSTANT NUMBER := 3; listCfCopy CONSTANT NUMBER := 4; listDfCopy CONSTANT NUMBER := 5; listCfBackup CONSTANT NUMBER := 6; listDfBackup CONSTANT NUMBER := 7; listAlBackup CONSTANT NUMBER := 8; listDfProxy CONSTANT NUMBER := 9; getRecovAction CONSTANT NUMBER := 10; getAlBackup CONSTANT NUMBER := 11; listAlCopy CONSTANT NUMBER := 12; listBSet CONSTANT NUMBER := 13; getSfBackup CONSTANT NUMBER := 14; listSfBackup CONSTANT NUMBER := 15; getAllBSet CONSTANT NUMBER := 16; listAlProxy CONSTANT NUMBER := 17; getRangeAlBackup CONSTANT NUMBER := 18; ------------------------ -- RMAN command types -- ------------------------ -- These defines are used as an interface to find out the command executed -- by rman. -- unknownCmd_t CONSTANT BINARY_INTEGER := 0; recoverCmd_t CONSTANT BINARY_INTEGER := 1; rcvCopyCmd_t CONSTANT BINARY_INTEGER := 2; obsoleteCmd_t CONSTANT BINARY_INTEGER := 3; restoreCmd_t CONSTANT BINARY_INTEGER := 4; blkRestoreCmd_t CONSTANT BINARY_INTEGER := 5; ---------------------------------------- -- What to do when archiver is stuck? -- ---------------------------------------- -- Set this to 0 if you want to disable the behavior of using memory -- sorting when archiver is stuck. -- stuckMemorySize CONSTANT NUMBER := 50 * 1024 * 1024; ----------------------- -- Backup Set Record -- ----------------------- TYPE bsRec_t IS RECORD ( recid number, stamp number, key number, setStamp number, setCount number, bsType varchar2(1), level number, elapseSecs number, compTime date, status varchar2(1), pieceCount number, keep_options number(4), keep_until date, multi_section varchar2(1) ); ------------------------ -- Backup Piece Record -- ------------------------- TYPE bpRec_t IS RECORD ( recid number, stamp number, key number, bskey number, setStamp number, setCount number, pieceNumber number, copyNumber number, status varchar2(1), compTime date, handle varchar2(1024), tag varchar2(32), deviceType varchar2(255), media varchar2(80), bytes number, compressed varchar2(3), site_key number ); --------------------------------- -- Backupset Validation Record -- --------------------------------- TYPE validBackupSetRec_t IS RECORD ( deviceType varchar2(255), tag varchar2(32), -- may be null copyNumber number, -- null if code 2 or 3 code number -- 1 => same copy# -- 2 => mix of copy#s, but -- same tag -- 3 => mix of copy#s and tags ); bsRecCacheEnabled constant boolean := TRUE; -- FALSE to use pre10i method bsRecCacheLowLimit constant number := 2048; -- minimum cache size bsRecCacheHighLimit constant number := 32768; -- maximum cache size TYPE incarnation_t IS RECORD ( INCARNATION# NUMBER, RESETLOGS_CHANGE# NUMBER, RESETLOGS_TIME DATE, PRIOR_RESETLOGS_CHANGE# NUMBER, PRIOR_RESETLOGS_TIME DATE, STATUS VARCHAR2(7), RESETLOGS_ID NUMBER, PRIOR_INCARNATION# NUMBER ); TYPE incarnation_set IS VARRAY(1) OF incarnation_t; -------------------- -- backup history -- -------------------- TYPE bhistoryRec_t IS RECORD ( dfNumber number, create_scn number, reset_scn number, reset_time date, ckp_scn number, ckp_time date, stop_scn number, logThread number, logSequence number, setStamp number, setCount number, compTime date, nbackups number, logTerminal varchar2(3), next_scn number, pluggedRonly binary_integer, -- 1 for read-only. Otherwise, 0 pluginSCN number, pluginRlgSCN number, pluginRlgTime date, newcreate_scn number, -- create_scn or pluginSCN newreset_scn number, -- reset_scn or pluginRlgSCN newreset_time date -- reset_time or pluginRlgTime ); --------------- -- aged file -- --------------- TYPE agedFileRec_t IS RECORD ( type number, key number, stamp number ); -------------------------------------------------------- -- List Backup Constants, Record and Global Varaibles -- -------------------------------------------------------- -- Constants -- NOTE: These constants are will be displayed in the RC_ view and will be -- visible to the user. backupset_txt CONSTANT VARCHAR2(16) := 'BACKUP SET'; copy_txt CONSTANT VARCHAR2(16) := 'COPY'; proxycopy_txt CONSTANT VARCHAR2(16) := 'PROXY COPY'; datafile_txt CONSTANT VARCHAR2(16) := 'DATAFILE'; spfile_txt CONSTANT VARCHAR2(16) := 'SPFILE'; archivedlog_txt CONSTANT VARCHAR2(16) := 'ARCHIVED LOG'; controlfile_txt CONSTANT VARCHAR2(16) := 'CONTROLFILE'; piece_txt CONSTANT VARCHAR2(16) := 'PIECE'; available_txt CONSTANT VARCHAR2(16) := 'AVAILABLE'; unavailable_txt CONSTANT VARCHAR2(16) := 'UNAVAILABLE'; expired_txt CONSTANT VARCHAR2(16) := 'EXPIRED'; deleted_txt CONSTANT VARCHAR2(16) := 'DELETED'; other_txt CONSTANT VARCHAR2(16) := 'OTHER'; full_txt CONSTANT VARCHAR2(16) := 'FULL'; incr1_txt CONSTANT VARCHAR2(16) := 'INCR1'; incr2_txt CONSTANT VARCHAR2(16) := 'INCR2'; incr3_txt CONSTANT VARCHAR2(16) := 'INCR3'; incr4_txt CONSTANT VARCHAR2(16) := 'INCR4'; incr_txt CONSTANT VARCHAR2(16) := 'INCR'; -- level unknown -- The following record type is returned by the listBackup() function -- NOTE: The name of variables in this structure are displayed in -- the view V_$BACKUP_FILES. rc_lbRec_t structure is used to display -- RC_BACKUP_FILES. So, remember to make change in rc_lbRec_t, -- rc_lbRec_t_body and rc_listBackup() if you need to make new -- columns visible to user. -- TYPE lbRec_t IS RECORD ( list_order1 NUMBER, -- just hint to correctly order records list_order2 NUMBER, -- just hint to correctly order records pkey NUMBER, -- primary key -- -- row part -- backup_type VARCHAR2(32), -- Type of the backup: -- * 'BACKUP SET' -- * 'COPY' -- * 'PROXY COPY' -- -- file part -- file_type VARCHAR2(32), -- Type of the file: -- * 'DATAFILE', -- * 'CONTROLFILE' -- * 'SPFILE' -- * 'REDO LOG' -- * 'PIECE' -- Common part. -- This part is shared by rows returned from listBackup. keep VARCHAR2(3), keep_until DATE, keep_options VARCHAR2(13), status VARCHAR2(16), -- Status of the piece/copy: -- * 'AVAIABLE' -- * 'UNAVAIABLE' -- * 'EXPIRED' -- * 'OTHER' fname VARCHAR2(1024), -- piece or copy name tag VARCHAR2(32), -- piece or copy tag media VARCHAR2(80), recid NUMBER, stamp NUMBER, device_type VARCHAR2(255), block_size NUMBER, completion_time DATE, is_rdf VARCHAR2(3), compressed VARCHAR2(3), obsolete VARCHAR2(3), keep_for_dbpitr VARCHAR2(3), bytes NUMBER, -- BACKUP SET part. -- Valid only when backup_type is 'BACKUP SET'. bs_key NUMBER, bs_count NUMBER, bs_stamp NUMBER, bs_type VARCHAR2(32), -- Type of the backup set: -- * 'DATAFILE' -- * 'ARCHIVED LOG' bs_incr_type VARCHAR2(32), bs_pieces NUMBER, bs_copies NUMBER, bs_completion_time DATE, bs_status VARCHAR2(16), -- Status of the backup set: -- * 'AVAIABLE' -- * 'UNAVAIABLE' -- * 'EXPIRED' -- * 'OTHER' bs_bytes NUMBER, bs_compressed VARCHAR2(3), -- If backup set is compressed: -- * 'YES' -- * 'NO' -- * 'OTHER' bs_tag VARCHAR2(1024), -- List of all tags of pieces. -- We don't repeate same tags. Tags -- divided by commas. bs_device_type VARCHAR2(255), -- List of device types of pieces. -- Device types are divided by commas. -- BACKUP PIECE part. -- Valid only when file_type is 'PIECE' and backup_type is 'BACKUP SET'. bp_piece# NUMBER, bp_copy# NUMBER, -- DATAFILE part. -- Valid only when file_type is 'DATAFILE', 'CONTROLFILE', or 'SPFILE'. df_file# NUMBER, df_tablespace VARCHAR2(30), df_resetlogs_change# NUMBER, df_creation_change# NUMBER, df_checkpoint_change# NUMBER, df_ckp_mod_time DATE, df_incremental_change# NUMBER, -- REDO LOG part. -- This part is valid only when file_type is 'REDO LOG'. rl_thread# NUMBER, rl_sequence# NUMBER, rl_resetlogs_change# NUMBER, rl_first_change# NUMBER, rl_first_time DATE, rl_next_change# NUMBER, rl_next_time DATE, -- SPFILE part sf_db_unique_name VARCHAR2(30) ); -- This record keeps a datafile information for listBackup function. -- In addition to normal datafile record, it contains various keepscn -- information. TYPE lbDfRec_t IS RECORD ( dfRec dfRec_t, -- This is the minimum checkpoint_change# of the backup that are kept -- for retention policy and its corresponding resetlogs_change#. -- A full backup of this datafile is kept if its checkpoint_change# is -- greater than fullmin_scn and its resetlogs_change# is greater -- that fullmin_scn or equal to fullmin_rlgscn. fullmin_scn NUMBER, fullmin_rlgscn NUMBER, -- This is the minimum checkpoint_change# of the backup that are kept -- for retention policy and its corresponding resetlogs_change#. -- A incremental backup of this datafile is kept if its checkpoint_change# -- is greater than incrmin_scn and its resetlogs_change# is greater -- than incrmin_scn or equal to incrmin_rlgscn. incrmin_scn NUMBER, incrmin_rlgscn NUMBER, -- This is the minimum checkpoint_change# of its backup that are kept -- for archived logs attribute and its corresponding resetlogs_change#. -- All archivelogs and its backups are kept if its first_change# is -- greater than the logmin_scn and its resetlogs_change# is greater -- thatn logmin_scn or equal to logmin_rlgscn. logmin_scn NUMBER, logmin_rlgscn NUMBER ); TYPE lbDfRecTab_t IS TABLE OF lbDfRec_t INDEX BY BINARY_INTEGER; TYPE lbRecTab_t IS TABLE OF lbRec_t INDEX BY BINARY_INTEGER; TYPE lbRecVar_t IS VARRAY(1) OF lbRec_t; TYPE rcvRecTabI_t IS TABLE OF rcvRec_t INDEX BY BINARY_INTEGER; TYPE rcvRecTabII_t IS TABLE OF rcvRecTabI_t INDEX BY BINARY_INTEGER; TYPE dfRecTab_t IS TABLE OF dfRec_t INDEX BY BINARY_INTEGER; TYPE numTab_t IS TABLE OF number INDEX BY BINARY_INTEGER; TYPE lbCursor_t IS REF CURSOR; ----------------------------------------------------------------------------- -- The following structire is used by the function listBackup. -- The variables in the strcuture are initialized when listBackup is called -- with firstCall=TRUE. ----------------------------------------------------------------------------- TYPE lbState_t IS RECORD ( -- The collection table lbRecOutTab keeps track of the rows which should -- returned by the function listBackup. The function listBackup will loop -- until it does not fill lbRecOutTab with at least one element. lbRecOutTab lbRecTab_t, lbRecOutTab_count binary_integer, -- The collection table lbRecTmpTab keeps track of the backup datafiles and -- backup archived log rows which are part of the backup set. lbRecTmpTab lbRecTab_t, -- The collection lbRecCmn keeps track of the backup set attributes. lbRecCmn lbRec_t, -- The collection table lbDfRecTab contains the list of all database files -- which ever existed after untilSCN. lbDfRecTabUs lbDfRecTab_t, -- The collection table lbDfRecTab contains the list of all database files -- which ever existed. lbDfRecTab dfRecTab_t, -- This variable hols the maximum number of the datafile. It is used for -- indextin of lbDfRecTab. lbMaxDfNumber number, -- For keep backups we need to know the current time. lbNowTime date, -- The table piece_count stores number of pieces in each copy. The variable -- copy_count says how many copies we have. lbPieceCountTab numTab_t, lbCopyCount binary_integer, -- Must Keep List is a table of rcvRecTabI_t indexed by binary_integer -- which itself is a table of rcvRec_t lbMkTab rcvRecTabII_t, -- Must Keep Incremental List is a table of rcvRecTabI_t indexed by -- binary_integer which itself is a table of rcvRec_t lbMkITab rcvRecTabII_t, -- The variable lbMinGrsp stands for minimum guaranteed restore point. -- An archived log backup set is obsolete if all rl_first_change# -- in the backup set is less than lbMinGrsp. No resetlogs information -- is compared. The redo log copies DOES NOT FOLLOW this rule. We -- keep the redo log copies only if needed by guaranteed restore point. -- The idea of keeping the backupset of redo log since the oldest GRP is -- to flashback to GRP2 from GRP1 (where GRP2 > GRP1) because it -- will require archivelogs outside the range listed by grsp table -- (from_scn - to_scn column). lbMinGrsp number, -- The variable lbFbUntilTime stands for Flashback Until Time. -- An archived log backup set is obsolete if all rl_first_time in the -- backup set is less than lbFbUntilTime. No resetlogs information is -- compared. The redo log copies (that is archived logs and proxy -- copies) follow the same rule. lbFbUntilTime date, -- The variable lbRlKeepRlgSCN is the resetlogs_change# associated with -- the lbRlKeepSCN. It is used in conjunction with lbRlKeepSCN to -- decide an obsolete archived log. When NULL, the resetlogs_change# -- is unknown. lbRlKeepRlgSCN number, -- The variable lbRlKeepSCN says that archived log backup set is -- obsolete if the rl_first_change# in the backup set is less than -- lbRlKeepSCN and its resetlogs_change# greater than lbRlKeepSCN and -- equal to lbRlKeepRlgSCN. -- The redo logs copies (that is archived logs and proxy copies) -- follow the same rule. lbRlKeepSCN number, -- If either lbObsoleteRetention or lbObsoleteKeep is set to TRUE when the -- current backup processed by listBackup is obsolete. -- If lbObsoleteRetention is TRUE, then the backup is obsolete because of -- retention policy. If lbObsoleteKeep is TRUE, then the backup is obsolete -- because of its keep attributes. lbObsoleteRetention boolean, lbKeepForDBPITR boolean, lbObsoleteKeep boolean, lbNeedObsoleteData boolean ); -- In case that listBackup is not called from pipeline function, then -- there is no need for the called to save and maintain the state: the -- function will use state from the package. lbStatePck lbState_t; --------------------------------------------------------------------------- -- End of global variable used by the function listBackup. --------------------------------------------------------------------------- -- Intelligent Repair variables TYPE failureRec_t IS RECORD ( priority VARCHAR2(8), failureId NUMBER, parentId NUMBER, childCount NUMBER, description VARCHAR2(1024), timeDetected DATE, status VARCHAR2(12), impacts VARCHAR2(1024) ); TYPE repairRec_t IS RECORD ( type NUMBER, failureidx NUMBER, repairidx NUMBER, description VARCHAR2(1024) ); TYPE repairParmsRec_t IS RECORD ( type NUMBER, failureidx NUMBER, repairidx NUMBER, name VARCHAR2(256), value VARCHAR2(512) ); TYPE repairOptionRec_t IS RECORD ( optionidx NUMBER, description VARCHAR2(1024) ); TYPE repairStepRec_t IS RECORD ( type NUMBER, failureidx NUMBER, repairidx NUMBER, repairstepidx NUMBER, workingrepair NUMBER, description VARCHAR2(1024), repairscript VARCHAR2(1024) ); ----------------------------------------------------- -- PUBLIC FUNCTION/PROCEDURE SPECIFICATION SECTION -- ----------------------------------------------------- ---------------------------------------- -- Debugging functions and procedures -- ---------------------------------------- FUNCTION dumpState( lineno IN number) RETURN varchar2; PROCEDURE dumpPkgState(msg in varchar2 default null); PROCEDURE setDebugOn; PROCEDURE setDebugOff; ---------------------------- -- Package Initialization -- ---------------------------- -- This is a vestigal function that was released to customers in 8.1.3 Beta. -- It is no longer called, and is no longer needed, but must still be here -- because this version of the package may be called by an 8.1.3 rman -- executable. PROCEDURE initialize(rman_vsn IN number); -- Used by 8.1.5 to re-assign the order of the backup_type constants to their -- correct order. This procedure is not called by 8.1.4-, so the constants -- will reamin set to the above values for those executables. PROCEDURE set_package_constants; ----------------------- -- Utility functions -- ----------------------- FUNCTION stamp2date(stamp IN number) RETURN date; ------------------------------------ -- Get Current Database Incarnation ------------------------------------ PROCEDURE getCurrentIncarnation( db_id IN number ,reset_scn OUT number ,reset_time OUT date); ------------------------------ -- Set Database Incarnation -- ------------------------------ -- setDatabase selects which target database subsequent dbms_rcvman -- procedures operate on. Note that only the current incarnation can be -- selected. If the target database or its current incarnation is not -- registered then setDatabase will fail. -- setDatabase sets the package state variables to point to the selected -- database and its current incarnation. -- The settings will be valid until the end of the session unless setDatabase -- is called again -- When dbms_rcvman package executes against the target database controlfile, -- setDatabase just returns without doing anything. -- Input parameters: -- db_id -- the value of kccfhdbi from the controlfile of the target database -- db_name -- the name of the database -- reset_scn -- the resetlogs SCN of this database -- reset_time -- the resetlogs time -- Exceptions: -- DATABASE_NOT_FOUND (ORA-20001) -- No database with the given db_id was found in the recovery catalog -- The database must be registered using registerDatabase first -- DATABASE_INCARNATION_NOT_FOUND (ORA-20003) -- No database incarnation matches the given arguments -- The database incarnation must be registered using resetDatabase first PROCEDURE setDatabase( db_name IN varchar2 ,reset_scn IN number ,reset_time IN date ,db_id IN number ,db_unique_name IN varchar2 default NULL ,site_aware IN boolean default FALSE ,dummy_instance IN boolean default FALSE); -- Return the db_unique_name associated with the db_id if there is one -- db_unique_name. If there is more than one db_unique_name, then raise -- too_many_rows error. If there is no row, then return NULL. FUNCTION getDbUniqueName( db_id IN number) RETURN varchar2; -- Return TRUE if the database site identified by current db_unique_name -- is standby FUNCTION DbUniqueNameIsStandby RETURN NUMBER; -- setCanConvertCf used to tell that client is capable of control file -- conversion PROCEDURE setCanConvertCf(flag IN boolean); -- setDbincKey used in lieu of setDatabase for when SET DBID command is -- issued. PROCEDURE setDbincKey( key IN number); -- getParent Incarnation returns the parent incarnation. If resetlogs_change# -- is NULL on input, then the current incarnation is returned. Returns TRUE -- if a row was returned, otherwise returns FALSE. FUNCTION getParentIncarnation( resetlogs_change# IN OUT number ,resetlogs_time IN OUT date) RETURN number; -- getCheckpoint gets and returns the highest recovery catalog checkpoint SCN -- for FULL checkpoints. This SCN indicates how current the datafilenames and -- lognames in the recovery catalog are. This SCN can be compared with a -- backup controlfile SCN to decide which name to use if they differ. PROCEDURE getCheckpoint( scn OUT number ,seq OUT number); -- This version of getCheckpoint is only used internally by -- dbms_rcvcat.cleanupCKP, to find out which rows can't be deleted from ckp. PROCEDURE getCheckpoint( scn OUT number ,seq OUT number ,ckp_key_1 OUT number ,ckp_key_2 OUT number); -- This procedure sets the package variables to return all logs not -- backed ntimes to specific device type until sbpscn (standby became primary -- SCN) ignore needstby flag for the subsequent archivelog translations. PROCEDURE SetGetSinceLastBackedAL(ntimes IN number DEFAULT 1, devtype IN varchar2 DEFAULT NULL, sbpscn IN number); ------------------- -- Query Filters -- ------------------- -- setCompletedRange sets completedBefore and/or completedAfter filters for -- use by computeRecoveryActions. -- setLikePattern sets fileName patter for computeRecoveryActions. -- -- setUntilTime, setUntilScn, setUntilLog, setToLog, setUntilResetlogs, -- resetUntil. -- These procedures are used to inform dbms_rcvman of an until_clause. -- The setUntil remains in effect until another setUntil has been called, -- or until resetUntil has been called. -- If none of these have been called, then all queries for name -- translation, restore, and recovery should assume that a complete recovery -- is being done. Otherwise, all restore and recovery queries should limit -- their replies to backup sets and datafile copies that are appropriate for -- use in an incomplete recovery until the specified until condition. Name -- translations should be done relative to the specified epoch. -- -- "appropriate" means that the fuzziness of the backup datafile or datafile -- copy ends at an SCN less than the untilChange SCN (for untilChange), or the -- low SCN of the specified log (for untilLog), or the fuzziness timestamp is -- less than the specified time (for unttime). Note that datafiles have three -- kinds of fuzziness, all of which must be less than the specified SCN or -- time. If the fuzziness of a datafile is unknown, then it should be -- ignored. -- -- The setUntil procedures will signal an error when executed against -- the target database controlfile. The resetUntil procedure can be -- executed against the controlfile, it but doesn't have any effect. -- Input parameters: -- unttime -- The incomplete recovery will stop when this timestamp is reached -- in the redo log. -- scn -- The incomplete recovery will stop when this scn is reached in the redo -- log. -- sequence#, thread# -- The incomplete recovery will stop when this log becomes the very next -- log to be applied. -- -- Exceptions: -- NO_RECOVERY_CATALOG (ORA-20300) -- this operation is not supported without the recovery catalog -- SEQUENCE_IS_NULL (ORA-20205) -- A null log sequence# was given -- LOG_MISSING (ORA-20206) -- No log with the give thread# and sequence# was found PROCEDURE setCompletedRange( after IN date ,before IN date); PROCEDURE setLikePattern( pattern IN varchar2); PROCEDURE setcanApplyAnyRedo( flag IN boolean); -- Obsolete as of 8.1.6 PROCEDURE setAllFlag( flag IN boolean); PROCEDURE setAllIncarnations( flag IN boolean); PROCEDURE setUntilTime( unttime IN date); -- If rlgscn, rlgtime is not provided, then the provided scn belongs to -- current or one of its parent. Otherwise, it should belong to the given -- rlgscn and lrgtime. -- If flbrp (flashback to restore point) is TRUE, then allow scn to be -- in orphan branch. Otherwise, we force scn be in one of its parent or -- current branch. PROCEDURE setUntilScn( scn IN number ,rlgscn IN number DEFAULT NULL ,rlgtime IN date DEFAULT NULL ,flbrp IN boolean DEFAULT FALSE ,rpoint IN boolean DEFAULT FALSE); PROCEDURE setUntilLog( sequence# IN number ,thread# IN number); PROCEDURE setToLog( sequence# IN number ,thread# IN number); PROCEDURE setUntilResetlogs; FUNCTION getUntilTime return date; FUNCTION getUntilScn return number; PROCEDURE resetUntil; -- setFrom is used to limit the potential restore candidates to either -- backup sets or datafile copies, or to allow either kind of file to -- be used. -- -- Input parameters: -- restorefrom -- One of BACKUP, COPY, or NULL. PROCEDURE setFrom( restorefrom IN number DEFAULT NULL); -- setDeviceType specifies the type of an allocated device. It is called 1 or -- more (up to 8) times , depending on the number of different device types -- that are allocated. dbms_rcvman should return only files that can be -- accessed through one of the device types specifed through this call. -- -- Input parameters: -- type -- type of the device -- Exceptions: -- NULL_DEVICE_TYPE -- A null device type was specied -- TOO_MANY_DEVICE_TYPES -- At most 8 device types can be specified PROCEDURE setDeviceType( type IN varchar2); -- setDeviceTypeAny is an alternative to setDeviceType. It causes dbms_rcvman -- to return a backup set on ANY device type. PROCEDURE setStandby( stby IN boolean); PROCEDURE setDeviceTypeAny; -- resetDeviceType resets the list of device types to null. PROCEDURE resetDeviceType; -- setTag is used to limit the restore candidates to backups and copies with -- the given tag. If the tag is NULL then all backups and copies are searched -- by the find functions. -- -- Input parameters: -- tag -- tag of the datafile copies to be translated -- The tag must be passed in uppercase ### ok? PROCEDURE setTag(tag IN varchar2 DEFAULT NULL); -- setRecoveryDestFile is used to limit the translation only to recovery -- area files. -- -- Input parameters: -- onlyrdf -- TRUE - only recovery area files -- FALSE - all files PROCEDURE setRecoveryDestFile(onlyrdf IN BOOLEAN); -- Set a site name in catalog -- all the translation will happen against this -- site. The package variable will be cleared after translation. -- If for_realfiles parameter is non-zero, then the translation for files -- in working area (datafile/onlinelogs/tempfiles) are done against -- the requested site. PROCEDURE setSiteName(db_unique_name IN VARCHAR2, for_realfiles IN NUMBER); -- Clear package variables set by setSiteName procedure PROCEDURE clrSiteName; -- get site name for given site_key FUNCTION getSiteName(site_key IN NUMBER) RETURN VARCHAR2; -- get site key for given db_unique_name FUNCTION getSiteKey(db_unique_name IN VARCHAR2) RETURN NUMBER; -- set Archive log file sharing scope attributes for the session PROCEDURE setArchiveFileScopeAttributes(logs_shared IN NUMBER); -- set Backup file sharing scope attributes for the session PROCEDURE setBackupFileScopeAttributes( disk_backups_shared IN NUMBER, tape_backups_shared IN NUMBER); -- resetAll calls resetUntil, setFrom, resetDevice and setTag to reset -- everything. PROCEDURE resetAll(transclause IN BOOLEAN DEFAULT TRUE); --------------------------- -- Backup Set Validation -- --------------------------- -- Use the findValidBackupSetRec public variable to save a backupset record -- for later use as an input argument to this procedure. findValidBackupSetRcvRec rcvRec_t; -- place to save a rcvRec_t PROCEDURE findValidBackupSet( backupSetRec IN rcvRec_t ,deviceType IN varchar2 DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,available IN number DEFAULT TRUE# -- for compat. ,unavailable IN number DEFAULT FALSE# -- for compat. ,deleted IN number DEFAULT FALSE# -- for compat. ,expired IN number DEFAULT FALSE# -- for compat. ,availableMask IN binary_integer DEFAULT NULL); -- for compat. findValidBackupSetBsRec bsRec_t; -- place to save a bsRec_t -- Obsolete as of 8.1.7 PROCEDURE findValidBackupSet( backupSetRec IN bsRec_t ,deviceType IN varchar2 DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,available IN number DEFAULT TRUE# -- for compat. ,unavailable IN number DEFAULT FALSE# -- for compat. ,deleted IN number DEFAULT FALSE# -- for compat. ,expired IN number DEFAULT FALSE# -- for compat. ,availableMask IN binary_integer DEFAULT NULL); -- for compat. FUNCTION getValidBackupSet( validBackupSetRec OUT NOCOPY validBackupSetRec_t ,checkDeviceIsAllocated IN number DEFAULT FALSE#) RETURN number; -- TRUE# -> got a record -- FALSE# -> no_data_found --------------------- -- Get an rcvRec_t -- --------------------- -- This function is a cover function for all procedures/functions that -- return a rcvRec_t. It routes the call to the correct procedure. It -- is provided for the convienence of krmk.pc. The function return value -- is whatever the underlying function returns. If we call a procedure, -- then getRcvRec returns 0. Refer to the funCode list above in the -- types/variables section. FUNCTION getRcvRec( funCode IN number ,rcvRec OUT NOCOPY rcvRec_t ,callAgain OUT number) RETURN number; -------------------------- -- Datafile Translation -- -------------------------- -- translateTableSpace translates a tablespace name into a list of datafile -- numbers. translateDataBase translates the database into a list of datafile -- numbers in the database excluding datafiles belonging to tablespaces -- specified using skipTableSpace. The translation is performed relative to -- epoch setting currently in use. getDataFile is used to obtain the datafile -- numbers, one at a time until null is returned. -- When doing the translation relative to current time the client should -- ensure that recovery catalog is up-to-date. When doing translations -- relative to an point-in-time in the past two potential anomalies may -- show up. -- -- 1) files belonging to a tablespace that was dropped before the point-in-time -- may be returned since the drop_scn and drop_time are approximations. -- As a result of this point-in-time recovery will restore and recover -- a tablespace which will be dropped before the database is opened. -- No real harm, just extra work for the recovery. And this won't happen -- if rcvcat is resynced immediatly after dropping a tablespace. -- 2) A tablespace which is created and dropped between two consecutive -- recovery catalog resyncs will never be recorded in the rcvcat. It is -- conceivable that such a tablespace existed at the intended point-in-time. -- As a result the tablespace will not be recovered and must be dropped -- after the database is opened. The worst case scenario is that a rollback -- segment was also created in this tablespace. The recovered database -- might fail to rollback some transactions. Again, this won't happen if -- rcvcat is always resynced after creating a tablespace. -- PS. These anomalies won't occur if the point-in-time is chosen to be -- a rcvcat checkpoint. -- Input parameters: -- ts_name -- name of the tablespace to be translated or skipped. -- The name must be in uppercase -- Exceptions: -- TABLESPACE_DOES_NOT_EXIST (ORA-20202) -- the tablespace to be translated does not exists (does not have any -- datafiles). Check that the recovery catalog is current. -- TRANSLATION_IN_PROGRESS (ORA-20203) -- the previous translation conversation is still in progess. -- To terminate get all datafiles with getDataFile. -- TRANSLATION_NOT_IN_PROGRESS (ORA-20204) -- getDataFile was called with no translation in progress PROCEDURE translateDatabase( sinceUntilSCN IN number DEFAULT NULL); PROCEDURE skipTableSpace( ts_name IN varchar2); PROCEDURE translateTablespace( ts_name IN varchar2); -- translateDataFile translates the datafile name/number into -- a datafile number and creation SCN and filename. getDataFile must -- be called to obtain the translation info, just as for the other translate -- functions. -- Unlike the other translation functions, translateDatafile by name is always -- performed relative to current time. If an until setting is in effect, -- and if the filename is ambiguous, then an exception is raised. Ambiguous -- means that the filename refers to different datafile at the until time than -- it does at the current time. This happens only when a filename has been -- reused. When fno and ckpscn are passed, the filename and other info as of -- that scn is returned. -- Input parameters: -- fname -- name of the datafile to be translated. -- The name must be a normalized filename. -- fno -- The datafile number. If the datafile number was not in use at the -- until time, then an exception is raised. -- Exceptions: -- DATAFILE_DOES_NOT_EXIST (ORA-20201) -- the datafile to be translated does not exists -- Check that the recovery catalog is current. PROCEDURE translateDataFile( fname IN varchar2); PROCEDURE translateDatafile( fno IN number); PROCEDURE translateDatafile( fno IN number ,ckpscn IN number); -- translateAllDatafile returns a list of all datafiles that ever -- existed in this database. PROCEDURE translateAllDatafile; PROCEDURE translateCorruptList; PROCEDURE getDatafile( dfRec OUT NOCOPY dfRec_t ,oldClient IN boolean DEFAULT FALSE); -- Obsolete as of 8.1.6 PROCEDURE getDataFile( file# OUT number ,crescn OUT number ,creation_time OUT date ,fname OUT varchar2 ,ts_name OUT varchar2 ,status OUT number ,blksize OUT number ,kbytes OUT number ,blocks OUT number ,unrecoverable_change# OUT number ,stop_change# OUT number ,read_only OUT number); -------------------------- -- Tempfile Translation -- -------------------------- -- translateTempfile translates tempfiles known to database in current -- incarnation. PROCEDURE translateTempfile; PROCEDURE translateTempfile(fname IN varchar2); PROCEDURE translateTempfile(fno IN number); -- Fetch the cursor opened by translateTempfiles and return a row one -- at a time until all rows are returned. Signal ORA-1403 (no-data-found) -- when there are no more rows to return. PROCEDURE getTempfile(tfRec OUT NOCOPY tfRec_t); ---------------------------- -- Online Log Translation -- ---------------------------- -- translateOnlineLogs translates the database to a list of online redo logs. -- The translation is always performed relative to current epoch. PROCEDURE translateOnlineLogs(srls IN number DEFAULT 0); PROCEDURE getOnlineLog( fname OUT varchar2 ,thread# OUT number ,group# OUT number); ----------------------------- -- Archivedlog Translation -- ----------------------------- -- translateArchivedLogKey translates the archived log key to a archived -- log recid and stamp in V$ARCHIVED_LOG. -- translateArchivedLogRange* procedures translate a specified -- archive log range to a list of archived logs. -- getArchivedLog is used to get the recid and stamp for each archived log, -- one at a time until null is returned. -- The available, unavailable and deleted parameters are used to limit -- the translation to archived logs with the desired status. For example, -- only available archived logs can be backed up, but unavailable and deleted -- archived logs can be restored from backups. -- The duplicates parameter controls whether the translation returns all -- archived logs or eliminates duplicate ones. Archived logs that have the -- same thread#, sequence# and low_scn are considered duplicates. (duplicate -- archived logs are usually created by copying archived logs). -- Note that only archived logs recorded in the recovery catalog or -- controlfile file are returned. If there is an archived log that belongs -- to the range but is not known, there will be a "hole" in the range. -- Input parameters: -- al_key -- key of the archived log record in the recovery catalog -- thread# -- return only logs that belong to this thread# -- if NULL return logs for all threads -- fromseq# -- lowest log sequence number in the range -- toseq# -- highest log sequence number in the range -- fromtime -- exclude logs that were switched out before fromtime -- totime -- exclude logs that were switched in after totime -- fromscn -- exclude logs that were switched out before fromscn -- toscn -- exclude logs that were switched in after toscn -- pattern -- return only archived logs whose filename match the pattern -- The pattern is matched against normalized filenames ### ok? -- available -- if TRUE (1) return available archived logs -- unavailable -- if TRUE (1) return unavailable archived logs -- deleted -- if TRUE (1) return deleted archived logs -- online -- if TRUE (1) return also inspected online logs (in addition to -- archived logs) -- duplicates -- if TRUE (1) return all archived logs -- if FALSE (0) eliminate duplicate archived logs -- Output parameters: -- recid -- recid of the archived log record (in V$ARCHIVED_LOG) -- stamp -- stamp of the archived log record (in V$ARCHIVED_LOG) -- thread# -- sequence# -- low_scn -- fname -- reset_scn -- block_size -- Exceptions: -- NO_RECOVERY_CATALOG (ORA-20300) -- this operation is not supported without the recovery catalog -- ARCHIVED_LOG_DOES_NOT_EXIST -- the key does not match any archived log -- TRANSLATION_IN_PROGRESS (ORA-20203) -- the previous translation conversation is still in progess. -- To terminate get all datafiles with getArchivedLog. -- TRANSLATION_NOT_IN_PROGRESS (ORA-20204) -- getArchivedLog was called with no translation in progress -- THREAD_IS_NULL (ORA-20210) -- a null thread# was passed to translateArchivedLogSeqRange -- HIGH_SEQUENCE_IS_NULL -- a null toseq# was passed to translateArchivedLogSeqRange -- UNTIL_TIME_IS_NULL (ORA-20212) -- a null totime was passed to translateArchivedLogTimeRange -- UNTIL_SCN_IS_NULL (ORA-20213) -- a null toscn was passed to translateArchivedLogSCNRange -- ARCHIVED_LOG_RANGE_IS_EMPTY -- the specified range doesn't contain any archived log ------------------------------ -- Archived Log Translation -- ------------------------------ PROCEDURE getArchivedLog( alRec OUT NOCOPY alRec_t ,closeCursor IN boolean DEFAULT FALSE); PROCEDURE translateArchivedLogKey( al_key IN number ,available IN number DEFAULT 1 -- ignored (for compatability) ,unavailable IN number DEFAULT 1 -- ignored (for compatability) ,deleted IN number DEFAULT 1 -- ignored (for compatability) ,online IN number DEFAULT 1 -- ignored (for compatability) ,recid OUT number ,stamp OUT number ,thread# OUT number ,sequence# OUT number ,low_scn OUT number ,reset_scn OUT number ,block_size OUT number ,fname OUT varchar2 ,needstby IN number DEFAULT NULL); PROCEDURE translateArchivedLogName( fname IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,online IN number -- ignored ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,needstby IN number DEFAULT NULL); -- for compatability -- For translate functions, the incarn parameter is interpreted as: -- -1 -- current incarnation -- 0 -- any incarnation -- other-- a specific incarnation number -- NULL -- should be defaulted PROCEDURE translateArchivedLogSeqRange( thread# IN number ,fromseq# IN number ,toseq# IN number ,pattern IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,online IN number -- ignored ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,needstby IN number DEFAULT NULL -- for compatability ,foreignal IN binary_integer DEFAULT 0 -- for compatability ,incarn IN number DEFAULT NULL); -- for compatibility PROCEDURE translateArchivedLogTimeRange( thread# IN number ,fromTime IN date ,toTime IN date ,pattern IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,online IN number -- ignored ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,needstby IN number DEFAULT NULL -- for compatability ,foreignal IN binary_integer DEFAULT 0 -- for compatability ,incarn IN number DEFAULT NULL); -- for compatibility PROCEDURE translateArchivedLogSCNRange( thread# IN number ,fromSCN IN number ,toSCN IN number ,pattern IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,online IN number ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,needstby IN number DEFAULT NULL ,doingRecovery IN number DEFAULT FALSE# ,onlyrdf IN binary_integer DEFAULT 0 -- for compatability ,reset_scn IN number DEFAULT NULL -- for compatability ,reset_time IN date DEFAULT NULL -- for compatability ,sequence# IN number DEFAULT NULL -- for compatability ,foreignal IN binary_integer DEFAULT 0 -- for compatability ,incarn IN number DEFAULT NULL); -- for compatibility PROCEDURE translateArchivedLogPattern( pattern IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,online IN number -- ignored ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,needstby IN number DEFAULT NULL -- for compatability ,foreignal IN binary_integer DEFAULT 0); -- for compatability PROCEDURE translateArchivedLogCancel; -- Set/Get filter functions for job views PROCEDURE sv_setSessionKey(skey IN NUMBER); PROCEDURE sv_setSessionTimeRange(fromTime IN DATE, untilTime IN DATE); FUNCTION sv_getSessionKey RETURN NUMBER; FUNCTION sv_getSessionfromTimeRange RETURN DATE; FUNCTION sv_getSessionUntilTimeRange RETURN DATE; -- translateBackupPieceKey looks up a backup piece by primary key. -- translateBackupPieceHandle looks up a backup piece by handle and deviceType. -- translatebackupPieceTag looks up backup pieces by tag. -- The available are unavailable parameters are used to limit the translation -- to backup pieces with the desired status. For example, only available -- backup pieces can be backed up, but unavailable pieces can be made -- available. Deleted backup pieces are never returned. -- Input parameters: -- bp_key -- key of the backup piece record in the recovery catalog -- handle -- backup piece handle -- device type -- device type on which the backup piece resides -- Exceptions: -- NO_RECOVERY_CATALOG (ORA-20300) -- this operation is not supported without the recovery catalog -- BACKUP_PIECE_DOES_NOT_EXIST -- the key does not match any backup piece -- BACKUP_PIECE_HANDLE_IS_AMBIGUOUS -- the key does not match any backup piece -- Obsolete as of 8.1.6 PROCEDURE getArchivedLog( recid OUT number ,stamp OUT number ,thread# OUT number ,sequence# OUT number ,low_scn OUT number ,nxt_scn OUT number ,fname OUT varchar2 ,reset_scn OUT number ,block_size OUT number ,blocks OUT number); --------------------------------- -- Controlfilecopy Translation -- --------------------------------- -- translateControlFileCopyName translates a control file name into a list of -- control file copies. -- Input parameters: -- fname -- name of the controlfile copy to be translated. -- The name must be a normalized filename -- Exceptions: -- CONTROLFILE_COPY_DOES_NOT_EXIST -- The filename does not match any controlfile copy PROCEDURE translateControlFileCopyName( fname IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,onlyone IN number DEFAULT 1); PROCEDURE translateControlFileCopyTag( cftag IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,onlyone IN number DEFAULT 1); PROCEDURE translateControlFileCopyKey( key IN number ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL); -- for compatability PROCEDURE getControlFileCopy( rcvRec IN OUT NOCOPY rcvRec_t); -- Obsolete as of 8.1.6 PROCEDURE getControlFileCopy( recid OUT number ,stamp OUT number ,reset_scn OUT number ,ckp_scn OUT number ,block_size OUT number); ------------------------------ -- Datafilecopy Translation -- ------------------------------ PROCEDURE getDataFileCopy( rcvRec OUT NOCOPY rcvRec_t ,closeCursor IN boolean DEFAULT FALSE); -- translateDataFileCopyKey translates the datafile copy key into a -- datafile copy recid and stamp in V$DATAFILE_COPY. -- translateDataFileCopyNumber translates a file number and (optional) tag -- to a datafile copy recid and stamp. Not used currently in 8.0. -- translatedDataFileCopyName translates the datafile copy name into a -- a list of datafile copies and getDataFileCopy returns the recid and stamp -- of each datafile copy. The duplicates parameter controls whether -- getDataFileCopy returns all matching datafile copies or just the most -- recent copy (highest stamp in rcvcat or highest recid in controlfile). -- translateDataFileCopyTag translates the tag into a list of datafile -- copies and getDataFileCopy returns the recid and stamp of each datafile copy -- one at a time until null is returned. -- translateDataFileCopyFno translates a file number into a list of datafile -- copies. getDataFileCopy returns the recid and stamp of each datafile -- copy one at at time until null is returned. The duplicates parameter -- controls whether getDataFileCopy returns all matching datafile copies or -- just the most recent copy (highest stamp in rcvcat or highest recid in -- controlfile). -- The available are unavailable parameters are used to limit the translation -- to datafile copies with the desired status. For example, only available -- datafile copies can be backed up, but unavailable copies can be made -- available. Deleted copies are never returned. -- The duplicates parameter controls whether getDataFileCopy returns all -- datafile copies or just the most recent (highest checkpoint scn) copy -- of each datafile (file#). -- Input parameters: -- cdf_key -- key of the datafile copy record in the recovery catalog -- fname -- name of the datafile copy to be translated. -- The name must be a normalized filename -- tag -- tag of the datafile copies to be translated -- The tag must be passed exactly as stored in the controlfile, -- it is not uppercased by translate -- available -- if TRUE (1) return available datafile copies -- unavailable -- if TRUE (1) return unavailable datafile copies -- duplicates -- if TRUE (1) return all datafile copies -- if FALSE (0) eliminate duplicate datafile copies -- -- The remaining parameters are returned for deleteDataFileCopy -- -- file# -- fname -- reset_scn -- create_scn -- ckp_scn -- blocks_size -- -- Exceptions: -- NO_RECOVERY_CATALOG (ORA-20300) -- translation by key is not supported without the recovery catalog -- DATAFILE_COPY_DOES_NOT_EXIST -- the specified key or filename does not match any datafile copy -- DATAFILE_COPY_NAME_AMBIGUOUS -- the specified filename matches more than one datafile copy -- TAG_DOES_NOT_MATCH -- the specified tag doesn't match any datafile copies -- TRANSLATION_IN_PROGRESS (ORA-20203) -- the previous translation conversation is still in progess. -- To terminate get all datafiles with getDataFileCopy. -- TRANSLATION_NOT_IN_PROGRESS (ORA-20204) -- getDataFileCopy was called with no translation in progress PROCEDURE translateDataFileCopyKey( cdf_key IN number ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL); -- for compatability -- Obsolete as of 8.1.6 PROCEDURE translateDataFileCopyKey( cdf_key IN number ,available IN number ,unavailable IN number ,recid OUT number ,stamp OUT number ,file# OUT number ,fname OUT varchar2 ,reset_scn OUT number ,create_scn OUT number ,ckp_scn OUT number ,block_size OUT number ,blocks OUT number); PROCEDURE translateDataFileCopyName( fname IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,onlyone IN number DEFAULT 1 ,pluginSCN IN number DEFAULT 0); PROCEDURE translateDataFileCopyTag( tag IN varchar2 ,available IN number DEFAULT NULL -- for compatibility ,unavailable IN number DEFAULT NULL -- for compatibility ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatibility ,pluginSCN IN number DEFAULT 0 ,onlytc IN binary_integer DEFAULT FALSE#); -- for compatibility PROCEDURE translateDataFileCopyFno( fno IN number ,available IN number DEFAULT NULL ,unavailable IN number DEFAULT NULL ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL ,pluginSCN IN number DEFAULT 0); PROCEDURE translateDataFileCopy( duplicates IN number ,statusMask IN binary_integer ,onlyrdf IN binary_integer ,pluginSCN IN number DEFAULT 0); -- Bug 2391697 PROCEDURE translateDatafileCancel; -- Obsolete as of 8.1.6 PROCEDURE getDataFileCopy( recid OUT number ,stamp OUT number ,file# OUT number ,fname OUT varchar2 ,reset_scn OUT number ,create_scn OUT number ,ckp_scn OUT number ,block_size OUT number ,blocks OUT number); ---------------------------- -- Proxy Copy Translation -- ---------------------------- PROCEDURE getProxyCopy( rcvRec OUT NOCOPY rcvRec_t ,closeCursor IN boolean DEFAULT FALSE); PROCEDURE translateProxyCopyKey( pc_key IN number ,deviceType IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,expired IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL); -- for compatability -- Obsolete as of 8.1.6 PROCEDURE translateProxyCopyKey( pc_key IN number ,device_type IN varchar2 ,available IN number ,unavailable IN number ,deleted IN number ,recid OUT number ,stamp OUT number ,handle OUT varchar2); PROCEDURE translateProxyCopyHandle( handle IN varchar2 ,deviceType IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,expired IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL); -- for compatability -- Obsolete as of 8.1.6 PROCEDURE translateProxyCopyHandle( handle IN varchar2 ,device_type IN varchar2 ,available IN number ,unavailable IN number ,deleted IN number ,recid OUT number ,stamp OUT number); PROCEDURE translateProxyCopyTag( tag IN varchar2 ,device_type IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL); -- for compatability -- translateProxyCopyKey translates a proxy copy key to a -- recid and stamp in V$PROXY_DATAFILE/V$PROXY_ARCHIVEDLOG -- translateProxyCopyHandle translates handle and device type to a -- proxy copy recid and stamp. -- getProxyCopy returns one proxy copy after calling translateProxyCopyTag. -- keep calling getProxyCopy until recid is null. -- The available and unavailable parameters are used to limit the -- translation to backup pieces with the desired status. -- Input parameters: -- pc_key -- key of the proxy copy record in the recovery catalog -- handle -- proxy copy handle -- device type -- device type on which the proxy copy resides -- Output parameters: -- recid -- recid/stamp of the proxy copy record (in V$PROXY_DATAFILE or -- V$PROXY_ARCHIVEDLOG) -- Exceptions: -- NO_RECOVERY_CATALOG (ORA-20300) -- this operation is not supported without the recovery catalog -- PROXY_COPY_DOES_NOT_EXIST -- the key does not match any proxy copy -- PROXY_COPY_HANDLE_IS_AMBIGUOUS -- the key matches more than one proxy copy -- Obsolete as of 8.1.6 PROCEDURE getProxyCopy( recid OUT number ,stamp OUT number ,handle OUT varchar2); ------------------------------ -- Backup Piece Translation -- ------------------------------ PROCEDURE getBackupPiece( bpRec OUT NOCOPY bpRec_t ,closeCursor IN boolean DEFAULT FALSE); PROCEDURE translateBackupPieceKey( key IN number ,available IN number DEFAULT TRUE# ,unavailable IN number DEFAULT TRUE# ,expired IN number DEFAULT TRUE# ,statusMask IN binary_integer DEFAULT NULL); -- for compatability PROCEDURE translateBackupPieceKey( -- only used in 8.1.6 bp_key IN number ,available IN number ,unavailable IN number ,recid OUT number ,stamp OUT number ,handle OUT varchar2 ,set_stamp OUT number ,set_count OUT number ,piece# OUT number); PROCEDURE translateBackupPieceHandle( handle IN varchar2 ,deviceType IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,expired IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL); -- for compatability PROCEDURE translateBackupPieceHandle( -- only used in 8.1.6 handle IN varchar2 ,device_type IN varchar2 ,available IN number ,unavailable IN number ,recid OUT number ,stamp OUT number ,set_stamp OUT number ,set_count OUT number ,piece# OUT number); PROCEDURE translateBackupPieceTag( tag IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL); -- for compatability PROCEDURE translateBackupPieceBSKey( key IN number ,tag IN varchar2 DEFAULT NULL ,deviceType IN varchar2 DEFAULT NULL ,pieceCount IN number ,duplicates IN number DEFAULT TRUE# ,copyNumber IN number DEFAULT NULL ,available IN number DEFAULT TRUE# ,unavailable IN number DEFAULT FALSE# ,deleted IN number DEFAULT FALSE# ,expired IN number DEFAULT FALSE# ,statusMask IN binary_integer DEFAULT NULL); -- for compatability PROCEDURE translateBackupPieceBsKey( startBsKey IN number ,tag IN varchar2 DEFAULT NULL ,statusMask IN binary_integer DEFAULT NULL); -- Translates all backupsets starting with specified backupset key, tag and -- status. Used to fetch a list of backuppieces in one cursor. PROCEDURE translateSeekBpBsKey( bsKey IN number ,deviceType IN varchar2 ,pieceCount IN number ,duplicates IN number DEFAULT TRUE# ,copyNumber IN number DEFAULT NULL); -- Seek follows translateBackupPieceBsKey. It is used seek to a specified -- backupset key, device, copyNumber. May return no_data_found if the -- backupset key is not found or the cursor have passed over the -- backupset key, device, copyNumber. -- If this function doesn't return any error, then use getBackupPiece to -- fetch all backuppieces until no_data_found. -- Then seek again for a backupset key that is greater than previous. -- The seek is expected to succeed always if the feeded backupset key is -- in ascending order. PROCEDURE translateBpBsKeyCancel; -- End the translation once you are done with fetching pieces of all -- backupsets. This will close the cursor and reset the package translation -- variables. -- Obsolete as of 8.1.6 PROCEDURE translateBackupSetKey( bs_key IN number ,device_type IN varchar2 ,available IN number ,unavailable IN number ,deleted IN number ,duplicates IN number ,backup_type OUT varchar2 ,recid OUT number ,stamp OUT number ,set_stamp OUT number ,set_count OUT number ,bslevel OUT number ,completion_time OUT date); -- Obsolete as of 8.1 PROCEDURE translateBackupSetKey( bs_key IN number ,device_type IN varchar2 ,available IN number ,unavailable IN number ,deleted IN number ,duplicates IN number ,backup_type OUT varchar2 ,recid OUT number ,stamp OUT number); -- Obsolete as of 8.1.6 PROCEDURE translateBackupSetRecid( recid IN number ,stamp IN number ,device_type IN varchar2 ,bs_key OUT number ,bslevel OUT number ,completed OUT date); -- Obsolete as of 8.1 PROCEDURE translateBackupSetRecid( recid IN number ,stamp IN number ,device_type IN varchar2); -- translateBackupPieceBSKey translates the specified backup set into a list of -- backup pieces. If there are multiple available copies of a piece then -- only the latest (with highest stamp) is returned. If there is no available -- copy of a piece then raise an exception. -- -- Input parameters: -- key -- key of the backup set record in the recovery catalog -- recid -- recid of the backup set record (in V$BACKUP_SET) -- stamp -- stamp of the backup set record (in V$BACKUP_SET) -- startBsKey -- translate all backupsets with this key and above. -- tag -- translate backuppieces with this tag -- statusMask -- translate backuppieces with this status -- deviceType -- translate backuppieces that are resides on this device -- Exceptions: -- BACKUP_SET_MISSING -- no backup set with the specified recid and stamp found -- NO_RECOVERY_CATALOG (ORA-20300) -- translation by bs_key is not supported without the recovery catalog -- Obsolete as of 8.1.6 PROCEDURE getBackupPiece( recid OUT number ,stamp OUT number ,bpkey OUT number ,set_stamp OUT number ,set_count OUT number ,piece# OUT number ,copy# OUT number ,status OUT varchar2 ,completion OUT date ,handle OUT varchar2); -- Obsolete as of 8.1 PROCEDURE getBackupPiece( recid OUT number ,stamp OUT number ,set_stamp OUT number ,set_count OUT number ,piece# OUT number ,handle OUT varchar2); ---------------------------- -- Backup Set Translation -- ---------------------------- PROCEDURE translateBackupSetKey( key IN number ,bsRec OUT NOCOPY bsRec_t); PROCEDURE translateAllBackupSet( backupType IN binary_integer ,tag IN varchar2 ,statusMask IN binary_integer ,completedAfter IN date ,completedBefore IN date ,onlyrdf IN binary_integer DEFAULT 0); PROCEDURE getAllBackupSet( rcvRec OUT NOCOPY rcvRec_t); ------------------------ -- Controlfile Backup -- ------------------------ -- allCopies = TRUE fetches duplex ones PROCEDURE findControlfileBackup( allCopies IN boolean default FALSE); -- getControlfileBackup is not a public function, but needs to be here due -- to bug 1269570. FUNCTION getControlfileBackup( rcvRec OUT NOCOPY rcvRec_t) RETURN number; -- getPrimaryDfName: return the name of a datafile as it appears on the primary FUNCTION getPrimaryDfName(fno IN NUMBER) RETURN VARCHAR2; -- findControlFileBackup finds the optimal copy or backup of the controlfile -- based on the given criteria. -- The optimal copy is the one with highest checkpoint SCN. Returns one of: -- SUCCESS, AVAILABLE, UNAVAILABLE. -- This is for 8.0.4 thru 8.1.5 compatibility FUNCTION findControlFileBackup( type OUT number ,recid OUT number ,stamp OUT number ,fname OUT varchar2 ,device_type OUT varchar2 ,ckp_scn OUT number) RETURN number; -- Obsolete as of 8.1.6 (8.1.5 uses this) FUNCTION findControlFileBackup( type OUT number ,recid OUT number ,stamp OUT number ,fname OUT varchar2 ,device_type OUT varchar2 ,ckp_scn OUT number ,rlg_scn OUT number ,blksize OUT number) RETURN number; ------------------------- -- Archived Log Backup -- ------------------------- PROCEDURE findRangeArchivedLogBackup( minthread IN number ,minsequence IN number ,minlowSCN IN number ,maxthread IN number ,maxsequence IN number ,maxlowSCN IN number ,allCopies IN boolean default FALSE); -- findRangeArchivedLogBackup finds all the backup sets that are required to -- restore the archivelog specified in the range. -- getRangeArchivedLogBackup returns the record for the backup set. The -- return value is one of: SUCCESS, AVAILABLE, UNAVAILABLE for each of the -- backup sets. -- -- Input Parameter: -- minthread#, maxthread# - range of threads -- minsequence#, maxsequence# - range of sequence# -- minlowSCN, maxlowSCN - range of lowSCN -- allCopies - TRUE fetches duplex ones -- Obsolete as of 11g PROCEDURE findArchivedLogBackup( thread IN number ,sequence IN number ,lowSCN IN number ,allCopies IN boolean default FALSE); -- findArchivedLogBackup finds a backup set containing the given archived log. -- getArchivedLogBackup returns the record for the backup set. The return -- value is one of: SUCCESS, AVAILABLE, UNAVAILABLE. -- -- Input Parameter: -- thread# -- sequence# -- low_scn -- allCopies - TRUE fetches duplex ones -- Obsolete as of 11g FUNCTION getArchivedLogBackup( rcvRec OUT NOCOPY rcvRec_t) RETURN binary_integer; -- Obsolete as of 8.1.6 FUNCTION findArchivedLogBackup( thread# IN number ,sequence# IN number ,low_scn IN number ,type OUT number ,recid OUT number ,stamp OUT number ,device_type OUT varchar2) RETURN number; ------------------- -- SPFILE Backup -- ------------------- -- allCopies = TRUE fetches duplex ones -- redundancy determine the number of redundant copies to fetch. -- rmanCmd = is the specific rman command -- scn_warn = 1 if we must estimate the time, 0 otherwise PROCEDURE findSpfileBackup( allCopies IN boolean default FALSE ,redundancy IN number default NULL ,rmanCmd IN number default unknownCmd_t); PROCEDURE findSpfileBackup( allCopies IN boolean default FALSE ,redundancy IN number default NULL ,rmanCmd IN number default unknownCmd_t ,scn_warn OUT number); -- redundancy determine the number of redundant copies to fetch if -- findSpfileBackup wasn't called earlier. FUNCTION getSpfileBackup( rcvRec OUT NOCOPY rcvRec_t ,redundancy IN number default NULL ,rmanCmd IN number default unknownCmd_t) RETURN number; --------------- -- List Copy -- --------------- PROCEDURE listTranslateControlfileCopy( tag IN varchar2 ,completedAfter IN date ,completedBefore IN date ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired ,liststby IN binary_integer DEFAULT NULL -- default for 8.1 ,file_pattern IN varchar2 DEFAULT NULL); PROCEDURE listGetControlfileCopy( rcvRec OUT NOCOPY rcvRec_t); -- Obsolete as of 8.1.6 FUNCTION listGetControlfileCopy( bcfkey OUT number ,ckpscn OUT number ,ckptime OUT date ,status OUT varchar2 ,completion OUT date ,fname OUT varchar2) RETURN number; PROCEDURE listTranslateDataFileCopy( file# IN number ,creation_change# IN number ,tag IN varchar2 DEFAULT NULL ,file_name_pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable -- default for 8.1 ,pluginSCN IN number DEFAULT 0); PROCEDURE listGetDataFileCopy( rcvRec OUT NOCOPY rcvRec_t); -- Obsolete as of 8.1.6 FUNCTION listGetDataFileCopy( cdf_key OUT number ,status OUT varchar2 ,fname OUT varchar2 ,completion_time OUT date ,checkpoint_change# OUT number ,checkpoint_time OUT date) RETURN number; PROCEDURE listTranslateArchivedLogCopy( thread# IN number ,sequence# IN number ,first_change# IN number ,file_name_pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired -- 8.0/8.1 defaults ,needstby IN number DEFAULT NULL); PROCEDURE listGetArchivedLogCopy( rcvRec OUT NOCOPY rcvRec_t); -- Obsolete as of 8.1.6 FUNCTION listGetArchivedLogCopy( al_key OUT number ,status OUT varchar2 ,fname OUT varchar2 ,completion_time OUT date) RETURN number; ----------------- -- List Backup -- ----------------- PROCEDURE listTranslateControlfileBackup( tag IN varchar2 ,completedAfter IN date ,completedBefore IN date ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired -- 8.0/8.1 defaults ,autobackup IN binary_integer DEFAULT BScfile_all ,liststby IN binary_integer DEFAULT NULL); PROCEDURE listGetControlfileBackup( rcvRec OUT NOCOPY rcvRec_t); -- Obsolete as of 8.1.6 FUNCTION listGetControlfileBackup( bskey OUT number, ckpscn OUT number, ckptime OUT date) RETURN number; PROCEDURE listTranslateSpfileBackup( completedAfter IN date ,completedBefore IN date); PROCEDURE listGetSpfileBackup( rcvRec OUT NOCOPY rcvRec_t); PROCEDURE listTranslateDataFileBackup( file# IN number ,creation_change# IN number ,tag IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired -- 8.0/8.1 defaults ,pluginSCN IN number DEFAULT 0); PROCEDURE listGetDataFileBackup( rcvRec OUT NOCOPY rcvRec_t); -- Obsolete as of 8.1.6 FUNCTION listGetDataFileBackup( bs_key OUT number ,backup_type OUT varchar2 ,incremental_level OUT number ,completion_time OUT date ,checkpoint_change# OUT number ,checkpoint_time OUT date) RETURN number; -- 8.1.5 LIST implementation PROCEDURE translateBackupFile( bs_recid IN number ,bs_stamp IN number ,fno IN number ,bskey OUT number ,inclevel OUT number ,backup_type OUT varchar2 ,completed OUT date); -- Used by 8.0 and 8.1.6, but not 8.1.5 PROCEDURE listTranslateArchivedLogBackup( thread# IN number ,sequence# IN number ,first_change# IN number ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired); -- 8.0/8.1 defaults PROCEDURE listGetArchivedLogBackup( rcvRec OUT NOCOPY rcvRec_t); -- Obsolete as of 8.1 FUNCTION listGetArchivedLogBackup( bs_key OUT number ,completion_time OUT date) RETURN number; -- Obsolete as of 8.1.6, but used in 9.0 PROCEDURE listTranslateArchivedLogBackup( thread# IN number DEFAULT NULL ,lowseq IN number DEFAULT NULL ,highseq IN number DEFAULT NULL ,lowscn IN number DEFAULT NULL ,highscn IN number DEFAULT NULL ,from_time IN date DEFAULT NULL ,until_time IN date DEFAULT NULL ,pattern IN varchar2 DEFAULT NULL); -- Obsolete as of 8.1.6 FUNCTION listGetArchivedLogBackup( bs_key OUT number ,thread# OUT number ,sequence# OUT number ,first_change# OUT number ,next_change# OUT number ,first_time OUT date ,next_time OUT date) RETURN number; -------------------- -- List Backupset -- -------------------- PROCEDURE listTranslateBackupsetFiles( bs_key IN number); PROCEDURE listGetBackupsetFiles( rcvRec OUT NOCOPY rcvRec_t); --------------------- -- List Proxy Copy -- --------------------- -- Note that this is used for both datafiles and the controlfile PROCEDURE listTranslateProxyDataFile( file# IN number ,creation_change# IN number ,tag IN varchar2 DEFAULT NULL ,handle_pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired ,liststby IN binary_integer DEFAULT NULL -- default for 8.1 ,pluginSCN IN number DEFAULT 0); PROCEDURE listGetProxyDataFile( rcvRec OUT NOCOPY rcvRec_t); -- Obsolete as of 8.1.6 FUNCTION listGetProxyDataFile( xdf_key OUT number ,recid OUT number ,stamp OUT number ,status OUT varchar2 ,handle OUT varchar2 ,completion_time OUT date ,checkpoint_change# OUT number ,checkpoint_time OUT date) RETURN number; -- This procedure serves absolutely no purpose. It is here only for -- backwards compatbility with 8.1.5. The only call to this is from -- krmkafs(), which gets called from krmkgra(). Since the calls are always -- in sequence, we can simply save the last record returned from -- getRecoveryAction and avoid doing an extra query. -- The only value this functions returns that krmkgra() didn't already have -- in 8.1.5 is the xdf_key. Completion time was being estimated from the -- stamp. PROCEDURE listTranslateProxyDFRecid( recid IN number ,stamp IN number ,xdf_key OUT number ,file# OUT number ,status OUT varchar2 ,handle OUT varchar2 ,completion_time OUT date ,checkpoint_change# OUT number ,checkpoint_time OUT date); PROCEDURE listTranslateProxyArchivedLog( thread# IN number ,sequence# IN number ,first_change# IN number ,tag IN varchar2 DEFAULT NULL ,handle_pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired); PROCEDURE listGetProxyArchivedLog( rcvRec OUT NOCOPY rcvRec_t); ------------------------------- -- List Database Incarnation -- ------------------------------- PROCEDURE listTranslateDBIncarnation( db_name IN varchar2 DEFAULT NULL, all_databases IN number DEFAULT 0); FUNCTION listGetDBIncarnation( db_key OUT number ,dbinc_key OUT number ,db_name OUT varchar2 ,db_id OUT number ,current_inc OUT varchar2 ,resetlogs_change# OUT number ,resetlogs_time OUT date ,dbinc_status OUT varchar2) RETURN number; FUNCTION listGetDBIncarnation( db_key OUT number ,dbinc_key OUT number ,db_name OUT varchar2 ,db_id OUT number ,current_inc OUT varchar2 ,resetlogs_change# OUT number ,resetlogs_time OUT date) RETURN number; ------------------------------- -- List Database Sites -- ------------------------------- PROCEDURE listTranslateDBSite( db_name IN varchar2 DEFAULT NULL, alldbs IN binary_integer DEFAULT 1); FUNCTION listGetDBSite( db_key OUT number ,db_id OUT number ,db_name OUT varchar2 ,db_role OUT varchar2 ,db_unique_name OUT varchar2) RETURN number; -------------------------------------- -- List Rollback Segment Tablespace -- -------------------------------------- PROCEDURE listRollbackSegTableSpace; FUNCTION listGetTableSpace( ts# OUT number ,ts_name OUT varchar2) RETURN number; ------------------------ -- Incremental Backup -- ------------------------ -- getIncrementalScn returns the starting scn for an incremental backup. -- Input Parameters: -- file# -- datafile number -- reset_scn -- the resetlogs SCN of the datafile -- reset_time -- the resetlogs time of the datafile -- incr_scn -- level of the incremental backup -- cumulative -- TRUE# if the backup is cumulative -- first -- TRUE open the cursor, otherwise just fetch from already opened cursor -- sourcemask -- the source on which this incremental is based on -- tag -- the source tag on which this incremental is based on -- Exceptions -- DATAFILE_DOES_NOT_EXIST -- INVALID_LEVEL -- NO_PARENT_BACKUP_FOUND FUNCTION getIncrementalScn( file# IN number ,create_scn IN number ,reset_scn IN number ,reset_time IN date ,incr_level IN number ,cumulative IN number ,sourcemask IN number DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,pluginSCN IN number DEFAULT 0) RETURN number; -- This one is an improved version of above. If you want to get -- incremental scn for all datafiles by opening the cursor only once, then -- using this will give enormous performance improvement. -- -- NOTE!! NOTE!! NOTE!! -- If you pass NULL to file# then it means all of the following -- o all datafiles -- o datafiles which has reset_scn and reset_time of current incarnation. -- It is the callers responsibility to fetch the incremental scn of remaining -- datafiles which doesn't have reset_scn and reset_time of current -- incarnation. PROCEDURE getIncrementalScn( first IN boolean -- open the cursor if this is TRUE ,file# IN number ,create_scn IN number ,reset_scn IN number ,reset_time IN date ,incr_level IN number ,cumulative IN number ,rcvRec OUT NOCOPY rcvRec_t ,sourcemask IN number DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,pluginSCN IN number DEFAULT 0 ,keep IN boolean DEFAULT NULL); -------------------- -- Offline Ranges -- -------------------- PROCEDURE findOfflineRangeCopy( offr_recid IN number ,offr_ckpscn IN number ,cf_cretime IN date ,dbinc_key IN number); PROCEDURE getOfflineRangeCopy( rcvRec OUT NOCOPY rcvRec_t); -- Obsolete as of 8.1.6 FUNCTION getOfflineRangeCopy RETURN varchar2; -- findOfflineRangeCopy begins the search for a controlfile copy -- containing a specified offline range. getOfflinRangeCopy is called -- to retrieve the controlfile names one by one. NULL is returned at -- end of fetch. -- Input Parameters: -- offr_recid -- recid of offline range -- offr_ckpscn -- online checkpoint SCN (end) of offline range -- dbinc_rlgscn -- resetlogs SCN of the db incarnation that contains this range -- Output Parameters: -- offr_recid -- recid of the offline range record -- offr_stamp -- stamp of the offline range record -- type -- type of the controlfile that contains the offline range. -- COPY or BACKUP -- recid -- the recid of datafile copy record or -- the recid of the backup set record -- stamp -- The timestamp associated with the recid in the controlfile. -- fname -- filename of the controlfile copy -- NULL if a backup controlfile is returned -- returns TRUE (1) if a copy or backup was found -- returns FALSE (0) if no copy or backup was found -- Exceptions: -- OFFLINE_RANGE_NOT_FOUND (ORA-20250) -- No offline range was found for the datafile starting at the offline SCN --------------------------------------- -- Recovery Functions and Procedures -- --------------------------------------- PROCEDURE setComputeRecoveryActionMasks( containerMask IN number ,actionMask IN number ,allRecords IN number ,availableMask IN binary_integer ,fullBackups IN number DEFAULT NULL); -- Input parameters: -- fullBackups -- Stop when these many full backups are fetched. Dependency on -- allRecords value is as follows: -- 1. When allRecords = FALSE# and fullBackups = NULL, we stop when -- one full backup is fetched. -- 2. When allRecords = FALSE# and fullBackups = N, we stop when -- N full backups are fetched. -- 3. When allRecords = TRUE# and fullBackups = NULL, we get all -- records. -- 4. When allRecords = TRUE# and fullBackups = N, we stack N -- full backups and all non-full backup records. -- --Obsolete as of 8.1.7 PROCEDURE setComputeRecoveryActionMasks( containerMask IN number ,actionMask IN number ,allRecords IN number); -- Obsolete as of 8.1.6 PROCEDURE setRAflags( kindMask IN number ,allRecords IN boolean); FUNCTION computeRecoveryActions( fno IN number, -- Datafile number. crescn IN number, -- Datafile creation SCN. df_rlgscn IN number -- Datafile resetlogs SCN. Null if this is a RESTORE default null, -- command, else this is the value in the datafile -- header for the datafile we are RECOVERing. df_rlgtime IN date -- Datafile resetlogs time. Null if df_rlgscn is default null, -- null, else value from datafile header. df_ckpscn IN number -- Datafile checkpoint SCN. Null if df_rlgscn is default null, -- null, else value from datafile header. offlscn IN number -- kccfeofs (may be null). default 0, onlscn IN number -- kccfeonc (null if offlscn is null). default 0, onltime IN date -- kccfeonc_time default null, cleanscn IN number -- kccfecps if either SOR or WCC set, else null. default 0, clean2scn IN number -- CF ckpt SCN if WCC set, infinity if SOR bit set default 0, -- else null. clean2time IN date -- cf ckpt time if WCC, SYSDATE if SOR default null, allowfuzzy IN boolean -- TRUE if can be fuzzy at until SCN/time, FALSE if default FALSE, -- not. default is FALSE. partial_rcv IN boolean -- TRUE if can do partial recovery, FALSE if not default FALSE, cf_scn IN number -- controlfile checkpoint SCN (NULL if none mounted) default NULL, cf_cretime IN date -- controlfile creation time (NULL if none mounted) default NULL, cf_offrrid IN number -- recid of oldest offline range in controlfile default NULL, -- (NULL if none mounted) allCopies IN boolean -- if TRUE, then stack all valid copies of a bu set default FALSE, df_cretime IN date -- datafile creation time default NULL, rmanCmd IN binary_integer default unknownCmd_t, foreignDbid IN number default 0, pluggedRonly IN binary_integer default 0, pluginSCN IN number default 0, pluginRlgSCN IN number default 0, pluginRlgTime IN date default NULL, creation_thread IN number default NULL, creation_size IN number default NULL ) return binary_integer; -- Returns: -- SUCCESS -> the file can be restored/recovered. -- else one of RESTORABLE, AVAILABLE, UNAVAILABLE, NO_ACTION. -- computeRecoveryActions return values -- SUCCESS CONSTANT binary_integer := 0; UNAVAILABLE CONSTANT binary_integer := 1; AVAILABLE CONSTANT binary_integer := 2; RESTORABLE CONSTANT binary_integer := 3; NO_ACTION CONSTANT binary_integer := 4; -- SUCCESS: A file has been found for RESTORE, or the file on disk -- can be recovered. -- UNAVAILABLE: If RESTORE, then no datafilecopy or level 0 backup was found. -- If RECOVER, then some incremental backup is missing, or the -- datafile on disk is too old to recover. -- AVAILABLE: If RESTORE, then some level 0 or datafilecopy exists, but -- the required device type is not allocated. -- RESTORABLE: This is returned only when doing a RECOVER. It means that -- the file on disk cannot be recovered, but there is some level -- 0 or datafilecopy that could be restored and then recovered. -- NO_ACTION: There are no incrementals or offline ranges to apply, but -- the file should be recoverable with redo. No guarantee is -- made that the logs needed are actually available. FUNCTION getRecoveryAction( action OUT NOCOPY rcvRec_t) RETURN binary_integer; -- Obsolete as of 8.1.6 FUNCTION getRecoveryAction( kind OUT number ,set_stamp OUT number ,set_count OUT number ,recid OUT number ,stamp OUT number ,fname OUT varchar2 ,blocksize OUT number ,blocks OUT number ,devtype OUT varchar2 ,from_scn OUT number ,to_scn OUT number ,to_time OUT date ,rlgscn OUT number ,rlgtime OUT date ,cfcretime OUT date ,dbinc_key OUT number) RETURN binary_integer; PROCEDURE printRecoveryActions; PROCEDURE trimRecoveryActions( maxActions IN number ,containerMask IN number ,actionMask IN number); -- trimRecoveryActions will trim the stack down to the specified number -- actions if it contains more. This is used by report obsolete to implement -- the redundancy count. The reason for it is that getRecoveryActions -- returns actions in LIFO order. This means the oldest actions, which -- were stacked most recently, are returned first. However, report obsolete -- wants to keep only the most recent backups when constructing the -- "must keep" list. We solve the problem by getting rid of any excess -- actions first, and so the order in which getRecoveryActions returns them -- won't matter. Note that only actions whose type_con and type_act are -- selected by the masks will be deleted. Other actions are left on the -- stack. --------------------- -- Report Obsolete -- --------------------- PROCEDURE reportTranslateDFDel ; -- pre 8.1.5 version FUNCTION reportGetDFDel( file# OUT number ,filetype OUT number ,checkpoint_change# OUT number ,checkpoint_time OUT date ,resetlogs_change# OUT number ,resetlogs_time OUT date ,incremental_change# OUT number ,fuzzy_change# OUT number ,recid OUT number ,stamp OUT number ,fname OUT varchar2 ,restorable OUT number) RETURN number; -- 8.1.5+ version FUNCTION reportGetDFDel( file# OUT number ,filetype OUT number ,checkpoint_change# OUT number ,checkpoint_time OUT date ,resetlogs_change# OUT number ,resetlogs_time OUT date ,incremental_change# OUT number ,fuzzy_change# OUT number ,recid OUT number ,stamp OUT number ,fname OUT varchar2 ,restorable OUT number ,key OUT number ,completion_time OUT date) RETURN number; ------------ -- TSPITR -- ------------ FUNCTION getCloneName( fno IN number ,crescn IN number ,pluscn IN number DEFAULT 0) RETURN varchar2; --------------- -- DUPLICATE -- --------------- FUNCTION wasFileOffline( fno IN number ,untilscn IN number) RETURN number; ------------------------- -- RMAN Configuration --- ------------------------- procedure getConfig( conf# OUT number ,name IN OUT varchar2 ,value IN OUT varchar2 ,first IN boolean); ------------------------------ -- Get max(copy#) -- ------------------------------ FUNCTION getmaxcopyno( bsstamp IN number ,bscount IN number) RETURN number; -------------------------- -- Add Corruption Table -- -------------------------- PROCEDURE bmrAddCorruptTable( dfnumber OUT number ,blknumber OUT number ,range OUT number ,first IN boolean); ------------------------ -- Get Backup History -- ------------------------ PROCEDURE getDfBackupHistory( backedUpDev IN varchar2 ,first IN boolean ,bhistoryRec OUT NOCOPY bhistoryRec_t ,recentbackup IN boolean DEFAULT FALSE -- get no: recent backups ,doingCmd IN varchar2 DEFAULT NULL ,keepTag IN varchar2 DEFAULT NULL ,toDest1 IN varchar2 DEFAULT NULL ,toDest2 IN varchar2 DEFAULT NULL ,toDest3 IN varchar2 DEFAULT NULL ,toDest4 IN varchar2 DEFAULT NULL); PROCEDURE getAlBackupHistory( backedUpDev IN varchar2 ,first IN boolean ,bhistoryRec OUT NOCOPY bhistoryRec_t ,doingCmd IN varchar2 DEFAULT NULL ,keepTag IN varchar2 DEFAULT NULL ,toDest1 IN varchar2 DEFAULT NULL ,toDest2 IN varchar2 DEFAULT NULL ,toDest3 IN varchar2 DEFAULT NULL ,toDest4 IN varchar2 DEFAULT NULL); PROCEDURE getBsBackupHistory( backedUpDev IN varchar2 ,first IN boolean ,set_stamp IN number DEFAULT NULL ,set_count IN number DEFAULT NULL ,bhistoryRec OUT NOCOPY bhistoryRec_t ,doingCmd IN varchar2 DEFAULT NULL ,keepTag IN varchar2 DEFAULT NULL ,toDest1 IN varchar2 DEFAULT NULL ,toDest2 IN varchar2 DEFAULT NULL ,toDest3 IN varchar2 DEFAULT NULL ,toDest4 IN varchar2 DEFAULT NULL); PROCEDURE getDcBackupHistory( backedUpDev IN varchar2 ,first IN boolean ,bhistoryRec OUT NOCOPY bhistoryRec_t ,doingCmd IN varchar2 DEFAULT NULL ,keepTag IN varchar2 DEFAULT NULL ,toDest1 IN varchar2 DEFAULT NULL ,toDest2 IN varchar2 DEFAULT NULL ,toDest3 IN varchar2 DEFAULT NULL ,toDest4 IN varchar2 DEFAULT NULL); -- Obsolute as of 9.2.0.1 PROCEDURE getBackupHistory( dfRec IN dfRec_t ,backedUpDev IN varchar2 ,nbackupsFlag IN number ,bscompletionFlag IN number ,nbackups OUT number ,bscompletion OUT date); -- Obsolute as of 9.2.0.1 PROCEDURE getBackupHistory( alRec IN alRec_t ,backedUpDev IN varchar2 ,nbackupsFlag IN number ,bscompletionFlag IN number ,nbackups OUT number ,bscompletion OUT date); PROCEDURE getBackupHistory( bpRec IN bpRec_t ,backedUpDev IN varchar2 ,nbackupsFlag IN number ,bscompletionFlag IN number ,nbackups OUT number ,bscompletion OUT date ,toDest1 IN varchar2 DEFAULT NULL ,toDest2 IN varchar2 DEFAULT NULL ,toDest3 IN varchar2 DEFAULT NULL ,toDest4 IN varchar2 DEFAULT NULL); ------------------ -- Version Info -- ------------------ FUNCTION getPackageVersion RETURN varchar2; ------------------ -- Simple Calls -- ------------------ FUNCTION isStatusMatch(status IN VARCHAR2, mask IN NUMBER) RETURN NUMBER; FUNCTION isDeviceTypeAllocated(deviceType IN varchar2) RETURN NUMBER; FUNCTION isBackupTypeMatch(btype IN VARCHAR2, mask IN binary_integer) RETURN NUMBER; ------------------------------ -- set rcvRecBackupAge value -- ------------------------------- PROCEDURE setRcvRecBackupAge(age IN number); ------------------------------ -- reset thisBackupAge value -- ------------------------------- PROCEDURE resetthisBackupAge; ------------------------------------- -- List (Obsolete) Backup Function -- ------------------------------------- PROCEDURE getRetentionPolicy(recovery_window OUT number ,redundancy OUT number); -- -- The function getRetentionPolicy is used to get currently configured -- retention policy. -- FUNCTION listBackup(lbRecOut OUT NOCOPY lbRec_t ,firstCall IN boolean ,only_obsolete IN boolean ,redundancy IN number ,piped_call IN boolean ,lbCursor IN OUT NOCOPY lbCursor_t ,lbState IN OUT NOCOPY lbState_t ,extRlKeepSCN IN number DEFAULT NULL) RETURN boolean; -- -- The function listBackup lists (obsolete) backups (backup sets, pieces, -- copies, proxy copies, and archived logs). -- -- The parameter firstCall must be TRUE on the very first call of the function. -- The return of the function is stored in lbRecOut. However the function can -- return without putting data in lbRecOut, so the caller should always check -- whether lbRecOut is NULL. -- If the exit code of the function is FALSE, then it means that all there -- no more data to be returned. -- -- piped_call - If FALSE, you must pass dbms_rcvman.lbStatePck as lbState. -- extRlKeepSCN - When passed a non-null value, the algorithm ensure to -- keep all archivelogs at and above this scn. PROCEDURE setNeedObsoleteData(NeedObsoleteData IN boolean DEFAULT TRUE); -- The function is an optimization fix to not to call computeRecoveryAction if -- client is not interested in obsolete column value. ----------------------------- getCopyofDatafile ------------------------------- -- This function obtains the latest AVAILABLE datafilecopy for all translated -- datafiles (and possibly the datafilecopies having a specific tag). PROCEDURE getCopyofDatafile( first IN boolean -- TRUE if this is the first time called ,itag IN varchar2 -- tag that the copy should have or NULL ,fno OUT number -- datafile number ,crescn OUT number -- creation scn of the datafile ,rlogscn OUT number -- resetlogs scn of the datafile ,rlgtime OUT date -- resetlogs time of the datafile ,recid OUT binary_integer -- recid of the latest datafilecopy ,stamp OUT binary_integer -- stamp of the latest datafilecopy ,name OUT varchar2 -- name of the datafilecopy ,otag OUT varchar2 -- tag of the datafilecopy ,status OUT varchar2 -- status of the datafilecopy ,nblocks OUT binary_integer -- number of blocks of datafilecopy ,bsz OUT binary_integer -- blocksize of the datafilecopy ,ctime OUT date -- creation time of the datafilecopy ,toscn OUT number -- checkpoint scn of the datafilecopy ,totime OUT date -- checkpoint time of the datafilecopy ,pluggedRonly OUT binary_integer -- 1 for read-only. Otherwise, 0 ,pluginSCN OUT number -- plugin scn ,pluginRlgSCN OUT number -- resetlogs when datafile was plugged ,pluginRlgTime OUT date); -- resetlog time when df was plugged -- This function obtains the latest AVAILABLE datafilecopy for a given -- datafile number (and possibly the datafilecopy having a specific tag). -- It returns all the information identifying the datafilecopy. -- Obsolete as of 11.2.0.3 PROCEDURE getCopyofDatafile( dfnumber IN number -- datafile number ,itag IN varchar2 -- tag that the copy should have or NULL ,crescn IN OUT number -- creation scn of the datafile ,rlogscn IN OUT number -- resetlogs scn of the datafile ,rlgtime IN OUT date -- resetlogs time of the datafile ,recid OUT binary_integer -- recid of the latest datafilecopy ,stamp OUT binary_integer -- stamp of the latest datafilecopy ,name OUT varchar2 -- name of the datafilecopy ,otag OUT varchar2 -- tag of the datafilecopy ,status OUT varchar2 -- status of the datafilecopy ,nblocks OUT binary_integer -- number of blocks of datafilecopy ,bsz OUT binary_integer -- blocksize of the datafilecopy ,ctime OUT date -- creation time of the datafilecopy ,toscn OUT number -- checkpoint scn of the datafilecopy ,totime OUT date -- checkpoint time of the datafilecopy ,pluggedRonly OUT binary_integer -- 1 for read-only. Otherwise, 0 ,pluginSCN IN number); -- plugin scn -- This function obtains the latest AVAILABLE datafilecopy for a given -- datafile number (and possibly the datafilecopy having a specific tag). -- It returns all the information identifying the datafilecopy. -- Obsolete as of 11g PROCEDURE getCopyofDatafile( dfnumber IN number -- datafile number ,itag IN varchar2 -- tag that the copy should have or NULL ,crescn IN number -- creation scn of the datafile ,rlogscn IN number -- resetlogs scn of the datafile ,rlgtime IN date -- resetlogs time of the datafile ,recid OUT binary_integer -- recid of the latest datafilecopy ,stamp OUT binary_integer -- stamp of the latest datafilecopy ,name OUT varchar2 -- name of the datafilecopy ,otag OUT varchar2 -- tag of the datafilecopy ,status OUT varchar2 -- status of the datafilecopy ,nblocks OUT binary_integer -- number of blocks of the datafilecopy ,bsz OUT binary_integer -- blocksize of the datafilecopy ,ctime OUT date -- creation time of the datafilecopy ,toscn OUT number -- checkpoint scn of the datafilecopy ,totime OUT date); -- checkpoint time of the datafilecopy --------------- -- Aged File -- --------------- PROCEDURE getdropOSFiles( first IN boolean ,agedFileRec OUT NOCOPY agedFileRec_t); PROCEDURE getBackedUpFiles( first IN boolean ,agedFileRec OUT NOCOPY agedFileRec_t); -- getRedoLogDeletion Policy -- -- Returns the policyType string as 'TO NONE' or 'TO APPLIED ON STANDBY' -- PROCEDURE getRedoLogDeletionPolicy( policy OUT varchar2); -- setRedoLogDeletion Policy -- -- Initialize global variables -- a) policyType to 'TO NONE' or 'TO APPLIED ON STANDBY' -- b) policyBind to 'MANDATORY' or 'NULL' -- c) policyTarget to 'NULL', 'STANDBY' or 'REMOTE' -- -- If standbyConfig validation failed to enfore the specified policyType, -- then we fallback to 'NONE' policy. -- -- Input parameters: -- policy - 'TO NONE' or 'TO APPLIED ON STANDBY' -- alldest - TRUE indicates the policyType is enforced on all destinations. -- Otherwise, only MANDATORY destination is honored. -- PROCEDURE setRedoLogDeletionPolicy( policy IN varchar2 ,alldest IN number); -- For a specified policyType, validate the standby configuration. -- Basically, it checks if there is atleast one destination on which the -- APPLIED policy can be enforced. Returns TRUE on success. Otherwise, -- FALSE. -- FUNCTION validateStandbyConfig( policy IN varchar2 ,alldest IN number) RETURN NUMBER; -- getSCNForAppliedPolicy-- -- Must be called after setRedoLogDeletionPolicy call. -- The function is intended to compute the SCN -- above which all archivelogs are kept for TO APPLIED|SHIPPED policy. -- -- Output Parameters: -- minscn - minimum scn that is applied on all standby and -- guaranteed restore point -- rlgscn - resetlogs scn corresponding to minscn -- PROCEDURE getSCNForAppliedPolicy( minscn OUT number ,rlgscn OUT number); -- getAppliedAl -- -- Return archivelogs records that has been applied on all destinations -- specified by validateStandbyConfig TARGET string and redoLogDeletionPolicy. -- -- Input parameters: -- first - Pass it TRUE when you are calling for first time. -- agedFileRec - Archivelog record that can be deleted. -- PROCEDURE getAppliedAl( first IN boolean ,agedFileRec OUT NOCOPY agedFileRec_t); -- getRequiredSCN -- -- Calculate the lowest gap for all destinations. Calculate the highest -- scn available on all valid standby destinations. If no gap, return the -- high scn, otherwise return the gap. If streams is true consider streams -- also when computing remote destination required SCN. PROCEDURE getRequiredSCN( reqscn OUT number ,rlgscn OUT number ,streams IN number DEFAULT 0 ,alldest IN number DEFAULT 0); -- getAppliedSCN -- -- returns the SCN till where the logs are applied at physical standby database PROCEDURE getAppliedSCN( appscn OUT number ,rlgscn OUT number ,alldest IN number); -- Is this file translated by RMAN? -- Returns TRUE# if translated. Otherwise, FALSE# FUNCTION isTranslatedFno(fno IN number) RETURN NUMBER; -- Is this a match in cacheBsRec Table?. -- Returns TRUE# if hit. Otherwise, FALSE# FUNCTION isBsRecCacheMatch( key IN number ,deviceType IN varchar2 ,tag IN varchar2 ,status IN varchar2) RETURN NUMBER; -- Reset reclaimable record. PROCEDURE resetReclRecid; -- Set Reclaimable record. PROCEDURE setReclRecid( rectype IN binary_integer ,recid IN number); -- Is this record reclaimable? -- Returns TRUE# if so. Otherwise, FALSE#. FUNCTION IsReclRecid( rectype IN binary_integer ,recid IN number) RETURN NUMBER; -- Return space reclaimable in bytes for files in reclaimable record table -- ceilAsm - when TRUE, ceil ASM file size in MB FUNCTION getSpaceRecl(ceilAsm IN binary_integer default 0) RETURN NUMBER; -- Given a name return information about the restore point. PROCEDURE getRestorePoint( name IN varchar2 ,rlgscn OUT number ,rlgtime OUT date ,scn OUT number ,guaranteed OUT number); -- Prep for LIST RESTORE POINT [name/null] PROCEDURE listTranslateRestorePoint( name IN varchar2); -- Fetch for LIST RESTORE POINT [name/null] PROCEDURE listGetRestorePoint( name OUT varchar2 ,scn OUT number ,rsptime OUT date ,cretime OUT date ,rsptype OUT varchar2); -- Convert input number to displayable canonical format. The number is -- converted to nearest M (mega bytes)/ G (giga bytes)/ T (tera bytes) -- /P (peta bytes). FUNCTION Num2DisplaySize(input_size IN NUMBER) return VARCHAR2; -- Convert input seconds to displayable canonical format [HH:MM:SI] FUNCTION Sec2DisplayTime(input_secs IN NUMBER) return VARCHAR2; FUNCTION getEncryptTSCount RETURN BINARY_INTEGER; -- Hint to indicate the archivelog that is interested. Later, -- isTranslatedArchivedLog can be called to verify the presence. It doesn't -- take resetlogs information in order to keep it simple. It is responsible -- for the client to validate further by comparing resetlogs information. PROCEDURE setArchivedLogRecord( thread# IN number ,sequence# IN number ,first IN boolean); -- To indicate that the database can handle backup transportable tablespace. -- Hence, RMAN client should make the plugged readonly files visible for -- translation. PROCEDURE setCanHandleTransportableTbs( flag IN boolean); -- Return the maximum next SCN to which the database can be recovered using -- archived logs. FUNCTION getArchivedNextSCN RETURN NUMBER; -- Check if there a log is missing between fromscn to untilscn. Return TRUE -- if a log is missing. Otherwise, FALSE. FUNCTION isArchivedLogMissing(fromSCN IN NUMBER, untilSCN IN NUMBER) RETURN NUMBER; -- Return the incarnation key to which the untilscn belongs if the untilscn -- is in one of its parent. 0 to indicate if the untilscn is in current -- incarnation. FUNCTION getIncarnationKey(untilSCN IN NUMBER) RETURN NUMBER; -- Hint to indicate the dbid that is interested. Later, isTranslatedDbid can -- be called to verify the presence. PROCEDURE setDbidTransClause(dbid IN number); -- Is this dbid translated by RMAN? -- Returns TRUE# if translated. Otherwise, FALSE# FUNCTION isTranslatedDbid(dbid IN number) RETURN NUMBER; -- Obtain maximum scn from archived logs registered in the catalog -- Obsolete in 11.2 FUNCTION getMaxScn RETURN number; FUNCTION getMaxScn(logmaxnt OUT date) RETURN NUMBER; FUNCTION getActualDbinc RETURN number; -- Returns the key of the incarnation that a previous set until -- performed with allIncarnations = TRUE# ended up using when -- the current incarnation was not selected. This is a recovery catalog -- only function. -- At the time of introduction of this function, it is only used by -- targetless duplicate. -- CUT_HERE <- tell sed where to chop off the rest pragma TIMESTAMP('2000-03-12:13:51:00'); END; -- dbms_rcvman or x$dbms_rcvman >>> #dbms_rcvman spec that is valid only in SYS version define dbmsrman2_sql <<< -- DE-HEAD2 <- tell sed where to chop off the rest ----------------------------------- -- Intelligent Repair Procedures -- ----------------------------------- ----------------------------- isInFailureList --------------------------------- -- -- isInFailureList is called to find out whether the parent_id or failureid -- is part of getFailureNumList or getFailureExclude list. -- Return TRUE# if present in failure_list. Otherwise, return FALSE#. -- -- Input parameters: -- parent_id : parent id in question -- failureid : failure id in question -- for_exclude : > 0 if to look up getFailureExclude. Otherwise, 0 -- to look up getFailureNumList. -- FUNCTION isInFailureList( parentId IN number ,failureId IN number ,for_exclude IN binary_integer ) RETURN NUMBER; ----------------------------- createFailureList ------------------------------- -- -- createFailureList is called to initialize a failure list in dbms_rcvman -- package. -- -- Input parameters: -- first_call : Pass it as TRUE if this is first entry in the list -- failureId : The failure id to be added to the list -- for_exclude : FALSE to initialize getFailureNumList and TRUE to -- initialize getFailureExclude list. -- PROCEDURE createFailureList( first_call IN boolean ,failureId IN number ,for_exclude IN boolean); ------------------------------ translateFailure ------------------------------- -- -- translateFailure is called to open the cursor in order to retrieve the list -- of failures (using getFailure) from ADR. createFailureList may be -- called before this function to initialize getFailureNumList and -- getFailureExclude list which is used to filter the output that corresponds -- to FAILNUM or EXCLUDE FAILNUM option in the grammar. -- -- Input Parameters: -- critical : > 0 if priority is critical or ALL. Otherwise, 0. -- high : > 0 if priroity is high or ALL. Otherwise, 0. -- low : > 0 if priority is low or ALL. Otherwise, 0. -- closed : > 0 if to list closed failures. Otherwise, 0. -- adviseId : If non-null adviseid is passed, then other parameters -- are ignored because adviseid the grammar doesn't -- allow adviseid with other options. -- PROCEDURE translateFailure( critical IN binary_integer ,high IN binary_integer ,low IN binary_integer ,closed IN binary_integer ,adviseId IN number); --------------------------------- getFailure ---------------------------------- -- -- getFailure is called to retrieve the failure list whose cursor is opened -- by translateFailure procedure. Until it returns no-data-found exception, -- this function is called again and again to retrieve all the failures. -- -- Output Parameters: -- failureRec : failure record that describes the failure. -- PROCEDURE getFailure( failureRec OUT NOCOPY failureRec_t); ------------------------------ translateRepair -------------------------------- -- -- translateRepair is called to open the cursor in order to retrieve the list -- of repairs (using getRepair). -- -- Input Parameters: -- adviseId : available repairs that corresponds to this advise id. -- PROCEDURE translateRepair( adviseid IN number); ----------------------------------- getRepair --------------------------------- -- -- getRepair is called to retrieve the repair list whose cursor is opened -- by translateRepair procedure. Until it returns no-data-found exception, -- this function is called again and again to retrieve all the options. -- -- Output Parameters: -- repairRec: repair record that describes the repair. -- PROCEDURE getRepair( repairRec OUT NOCOPY repairRec_t); -------------------------- translateRepairParms ------------------------------- -- -- translateRepairParms is called to open the cursor in order to retrieve -- the list of repair parameters(using getRepairParms). -- -- Input Parameters: -- adviseId : available repairs that corresponds to this advise id. -- PROCEDURE translateRepairParms( adviseid IN number); --------------------------------- getRepairParms ----------------------------- -- -- getRepairParms is called to retrieve the repair parameters whose cursor -- is opened by translateRepairParms procedure. Until it returns no-data-found -- exception, this function is called again and again to retrieve all -- the repair parameters. -- -- Output Parameters: -- repairRecParams: repair record that describes the repair. -- PROCEDURE getRepairParms( repairParmsRec OUT NOCOPY repairParmsRec_t); ---------------------------- translateRepairOption -------------------------- -- -- translateRepairOption is called to open the cursor in order to retrieve -- the list of repair option (using getRepairOption). -- -- Input Parameters: -- adviseId : available repair option that corresponds to this advise id. -- PROCEDURE translateRepairOption( adviseid IN number); ------------------------------- getRepairOption ------------------------------- -- -- getRepairOption is called to retrieve the repair option list whose cursor -- is opened by translateRepairOption procedure. Until it returns -- no-data-found exception, this function is called again and again to -- retrieve all the options. -- -- Output Parameters: -- repairOptionRec: repair option record that describes the option. -- PROCEDURE getRepairOption( repairOptionRec OUT NOCOPY repairOptionRec_t); ----------------------------- translateRepairStep ---------------------------- -- -- translateRepairStep is called to open the cursor in order to retrieve the -- list of repair step (using getRepairStep). -- -- Input Parameters: -- optionidx: available repair step that corresponds to this option idx. -- PROCEDURE translateRepairStep( optionidx IN number); -------------------------------- getRepairStep -------------------------------- -- -- getRepairStep is called to retrieve the repair steps whose cursor -- is opened by translateRepairStep procedure. Until it returns -- no-data-found exception, this function is called again and again to -- retrieve all the steps. -- -- Output Parameters: -- repairStepRec: repair step record that describes the step. -- PROCEDURE getRepairStep( repairStepRec OUT NOCOPY repairStepRec_t); ---------------------------- translateManualRepair ---------------------------- -- -- translateManualRepair is called to open the cursor in order to retrieve -- the list of manual repairs (using getManualRepair). -- -- Input Parameters: -- adviseId : available manualrepairs that corresponds to this advise id. -- PROCEDURE translateManualRepair( adviseId IN number); -------------------------------- getManualRepair ------------------------------ -- -- getManualRepair is called to retrieve the manual repair message whose cursor -- is opened by translateManualRepair procedure. Until it returns -- no-data-found exception, this function is called again and again to -- retrieve all the manual messages. -- -- Return: -- Return the manual repair message. -- FUNCTION getManualRepair( mandatory OUT varchar2) RETURN varchar2; ----------------------------- getRepairScriptName ----------------------------- -- -- getRepairScriptName is called to retrieve the repair script filename -- from v$ir_repair and description. -- -- Input Parameters: -- repairId : retrieve repair script filename for this repair id -- Return: -- Return the repair script location and description. -- FUNCTION getRepairScriptName( repairId IN number, description OUT varchar2) RETURN varchar2; pragma TIMESTAMP('2000-03-12:13:51:00'); END; -- dbms_rcvman or x$dbms_rcvman -- CUT_HERE2 <- tell sed where to chop off the rest >>> define dbmsrman3_sql <<< -- DE-HEAD3 <- tell sed where to chop off the rest -- Move the role/grant here from catalog.sql due to restructuring. -- Recovery Catalog owner role -- Do not drop this role recovery_catalog_owner. -- Drop this role will revoke this role from all rman users. -- If this role exists, ORA-1921 is expected. declare role_exists exception; pragma exception_init(role_exists, -1921); begin execute immediate 'create role recovery_catalog_owner'; exception when role_exists then null; end; / grant create session,alter session,create synonym,create view, create database link,create table,create cluster,create sequence, create trigger,create procedure, create type to recovery_catalog_owner; drop public synonym v$backup_files; drop view v_$backup_files; drop function v_listBackupPipe; drop type v_lbRecSetImpl_t; drop type v_lbRecSet_t; drop type v_lbRec_t; -- obsolete column is at 20 position in this object and the object -- implementation performs some optimization based on whether user selected -- obsolete column (see Fetch function). If you happen to add a element in -- this object before 20th position, you should fix the Fetch function also. create type v_lbRec_t as object ( list_order1 NUMBER, list_order2 NUMBER, pkey NUMBER, backup_type VARCHAR2(32), file_type VARCHAR2(32), keep VARCHAR2(3), keep_until DATE, keep_options VARCHAR2(13), status VARCHAR2(16), fname VARCHAR2(1024), tag VARCHAR2(32), media VARCHAR2(80), recid NUMBER, stamp NUMBER, device_type VARCHAR2(255), block_size NUMBER, completion_time DATE, is_rdf VARCHAR2(3), compressed VARCHAR2(3), obsolete VARCHAR2(3), bytes NUMBER, bs_key NUMBER, bs_count NUMBER, bs_stamp NUMBER, bs_type VARCHAR2(32), bs_incr_type VARCHAR2(32), bs_pieces NUMBER, bs_copies NUMBER, bs_completion_time DATE, bs_status VARCHAR2(16), bs_bytes NUMBER, bs_compressed VARCHAR2(3), bs_tag VARCHAR2(1024), bs_device_type VARCHAR2(255), bp_piece# NUMBER, bp_copy# NUMBER, df_file# NUMBER, df_tablespace VARCHAR2(30), df_resetlogs_change# NUMBER, df_creation_change# NUMBER, df_checkpoint_change# NUMBER, df_ckp_mod_time DATE, df_incremental_change# NUMBER, rl_thread# NUMBER, rl_sequence# NUMBER, rl_resetlogs_change# NUMBER, rl_first_change# NUMBER, rl_first_time DATE, rl_next_change# NUMBER, rl_next_time DATE ); / create type v_lbRecSet_t as table of v_lbRec_t; / create type v_lbRecSetImpl_t as object ( curval number, -- current rownum done number, -- done with the query needobsolete number, -- user requested obsolete column static function ODCITablePrepare(sctx OUT v_lbRecSetImpl_t, ti IN SYS.ODCITabFuncInfo) return number, static function ODCITableStart(sctx IN OUT v_lbRecSetImpl_t) return number, member function ODCITableFetch(self IN OUT v_lbRecSetImpl_t, nrows IN number, objSet OUT v_lbRecSet_t) return number, member function ODCITableClose(self IN v_lbRecSetImpl_t) return number ); / create or replace type body v_lbRecSetImpl_t is static function ODCITablePrepare(sctx OUT v_lbRecSetImpl_t, ti IN SYS.ODCITabFuncInfo) return number is begin -- create instance of object, initialise curval, done and needobsolete sctx:=v_lbRecSetImpl_t(0, 0, 0); -- check if user is interested in obsolete column. If this column location -- is changed in object definition, this should be fixed. for i in ti.Attrs.first .. ti.Attrs.last loop if (ti.Attrs(i) = 20) then sctx.needobsolete := 1; exit; end if; end loop; return SYS.ODCIConst.Success; end ODCITablePrepare; static function ODCITableStart(sctx IN OUT v_lbRecSetImpl_t) return number is begin return SYS.ODCIConst.Success; end ODCITableStart; -- Fetch function is not called more than once. It returns all rows when -- called first time for each query because we can not have package composite -- types within object definition. For the same reason, the nrows parameter -- is ignored. member function ODCITableFetch(self IN OUT v_lbRecSetImpl_t, nrows IN number, objSet OUT v_lbRecSet_t) return number is n number := 0; firstCall boolean := TRUE; ret boolean := TRUE; redundancy number; recovery_window number; untilTime date; lbRec sys.dbms_rcvman.lbrec_t; lbCursor sys.dbms_rcvman.lbCursor_t; lbState sys.dbms_rcvman.lbState_t; begin objSet:=v_lbRecSet_t(); -- reset package state sys.dbms_rcvman.resetAll; -- Set database so that user does not need to care sys.dbms_rcvman.setDatabase(NULL, NULL, NULL, NULL); redundancy := 1; recovery_window := 0; -- We need to get the retention policy, and to set untilTime if -- retention policy is recovery_window. -- Get retention policy (recovery window and redunadcy). sys.dbms_rcvman.getRetentionPolicy(recovery_window, redundancy); -- Always work with all incarnations. sys.dbms_rcvman.setAllIncarnations(TRUE); -- Set untilTime and untilSCN for recovery window (if any). if (recovery_window > 0) then select (sysdate-recovery_window) into untilTime from dual; sys.dbms_rcvman.setUntilTime(untilTime); end if; sys.dbms_rcvman.setDeviceTypeAny; if (recovery_window = 0 and redundancy = 0) then -- don't need obsolete data if there the policy is NONE sys.dbms_rcvman.setNeedObsoleteData(false); else if self.needobsolete = 1 then sys.dbms_rcvman.setNeedObsoleteData(true); else sys.dbms_rcvman.setNeedObsoleteData(false); end if; end if; while ret and self.done = 0 loop ret := sys.dbms_rcvman.listBackup(lbRec, firstCall, FALSE, redundancy, TRUE, lbCursor, lbState, null); if (lbRec.pkey is not null) then objSet.extend; n := n + 1; objSet(n):= v_lbRec_t( to_number(null), -- list_order1 to_number(null), -- list_order2 to_number(null), -- pkey to_char(null), -- backup_type to_char(null), -- file_type to_char(null), -- keep to_date(null), -- keep_until to_char(null), -- keep_options to_char(null), -- status to_char(null), -- fname to_char(null), -- tag to_char(null), -- media to_number(null), -- recid to_number(null), -- stamp to_char(null), -- device_type to_number(null), -- block_size to_date(null), -- completion_time to_char(null), -- is_rdf to_char(null), -- compressed to_char(null), -- obsolete to_number(null), -- bytes to_number(null), -- bs_key to_number(null), -- bs_count to_number(null), -- bs_stamp to_char(null), -- bs_type to_char(null), -- bs_incr_type to_number(null), -- bs_pieces to_number(null), -- bs_copies to_date(null), -- bs_completion_time to_char(null), -- bs_status to_number(null), -- bs_bytes to_char(null), -- bs_compressed to_char(null), -- bs_tag to_char(null), -- bs_device_type to_number(null), -- bp_piece# to_number(null), -- bp_copy# to_number(null), -- df_file# to_char(null), -- df_tablespace to_number(null), -- df_resetlogs_change# to_number(null), -- df_creation_change# to_number(null), -- df_checkpoint_change# to_date(null), -- df_ckp_mod_time to_number(null), -- df_incremental_change# to_number(null), -- rl_thread# to_number(null), -- rl_sequence# to_number(null), -- rl_resetlogs_change# to_number(null), -- rl_first_change# to_date(null), -- rl_first_time to_number(null), -- rl_next_change# to_date(null)); -- rl_next_time; objSet(n).list_order1 := lbRec.list_order1; objSet(n).list_order2 := lbRec.list_order2; objSet(n).pkey := lbRec.pkey; objSet(n).backup_type := lbRec.backup_type; objSet(n).file_type := lbRec.file_type; objSet(n).keep := lbRec.keep; objSet(n).keep_until := lbRec.keep_until; objSet(n).keep_options := lbRec.keep_options; objSet(n).status := lbRec.status; objSet(n).fname := lbRec.fname; objSet(n).tag := lbRec.tag; objSet(n).media := lbRec.media; objSet(n).recid := lbRec.stamp; objSet(n).stamp := lbRec.stamp; objSet(n).device_type := lbRec.device_type; objSet(n).block_size := lbRec.block_size; objSet(n).completion_time := lbRec.completion_time; objSet(n).is_rdf := lbRec.is_rdf; objSet(n).compressed := lbRec.compressed; objSet(n).obsolete := lbRec.obsolete; objSet(n).bytes := lbRec.bytes; objSet(n).bs_key := lbRec.bs_key; objSet(n).bs_count := lbRec.bs_count; objSet(n).bs_stamp := lbRec.bs_stamp; objSet(n).bs_type := lbRec.bs_type; objSet(n).bs_incr_type := lbRec.bs_incr_type; objSet(n).bs_pieces := lbRec.bs_pieces; objSet(n).bs_copies := lbRec.bs_copies; objSet(n).bs_completion_time := lbRec.bs_completion_time; objSet(n).bs_status := lbRec.bs_status; objSet(n).bs_bytes := lbRec.bs_bytes; objSet(n).bs_compressed := lbRec.bs_compressed; objSet(n).bs_tag := lbRec.bs_tag; objSet(n).bs_device_type := lbRec.bs_device_type; objSet(n).bp_piece# := lbRec.bp_piece#; objSet(n).bp_copy# := lbRec.bp_copy#; objSet(n).df_file# := lbRec.df_file#; objSet(n).df_tablespace := lbRec.df_tablespace; objSet(n).df_resetlogs_change# := lbRec.df_resetlogs_change#; objSet(n).df_creation_change# := lbRec.df_creation_change#; objSet(n).df_checkpoint_change# := lbRec.df_checkpoint_change#; objSet(n).df_ckp_mod_time := lbRec.df_ckp_mod_time; objSet(n).df_incremental_change# := lbRec.df_incremental_change#; objSet(n).rl_thread# := lbRec.rl_thread#; objSet(n).rl_sequence# := lbRec.rl_sequence#; objSet(n).rl_resetlogs_change# := lbRec.rl_resetlogs_change#; objSet(n).rl_first_change# := lbRec.rl_first_change#; objSet(n).rl_first_time := lbRec.rl_first_time; objSet(n).rl_next_change# := lbRec.rl_next_change#; objSet(n).rl_next_time := lbRec.rl_next_time; end if; firstCall := false; self.curval:=self.curval+1; if not ret then self.done := 1; end if; end loop; return SYS.ODCIConst.Success; end ODCITableFetch; member function ODCITableClose(self IN v_lbRecSetImpl_t) return number is begin return SYS.ODCIConst.Success; end ODCITableClose; end; / CREATE OR REPLACE FUNCTION v_listBackupPipe RETURN v_lbRecSet_t PIPELINED using v_lbRecSetImpl_t; / -- -- The following views are connected with dbms_rcvman packages and -- they are only part of the admin/dbmsrman.sql file which started from -- catproc.sql. Note that these views are not fixed views and they don't -- NOTE: The following elemnts from lbRect_t should not be in the view: -- - is_rdf -- - list_order -- - df_incremental_change# -- create or replace view v_$backup_files as select pkey, backup_type, file_type, keep, keep_until, keep_options, status, fname, tag, media, recid, stamp, device_type, block_size, completion_time, compressed, obsolete, bytes, bs_key, bs_count, bs_stamp, bs_type, bs_incr_type, bs_pieces, bs_copies, bs_completion_time, bs_status, bs_bytes, bs_compressed, bs_tag, bs_device_type, bp_piece#, bp_copy#, df_file#, df_tablespace, df_resetlogs_change#, df_creation_change#, df_checkpoint_change#, df_ckp_mod_time, rl_thread#, rl_sequence#, rl_resetlogs_change#, rl_first_change#, rl_first_time, rl_next_change#, rl_next_time from table(v_listBackupPipe); create or replace public synonym v$backup_files for v_$backup_files; / grant execute on sys.dbms_rcvman to select_catalog_role; grant select on v_$backup_files to select_catalog_role; create or replace view v_$rman_backup_subjob_details as select * from v$rman_backup_subjob_details; create or replace public synonym v$rman_backup_subjob_details for v_$rman_backup_subjob_details; grant select on v_$rman_backup_subjob_details to select_catalog_role; create or replace view v_$rman_backup_job_details as select * from v$rman_backup_job_details; create or replace public synonym v$rman_backup_job_details for v_$rman_backup_job_details; grant select on v_$rman_backup_job_details to select_catalog_role; create or replace view v_$backup_set_details as select * from v$backup_set_details; create or replace public synonym v$backup_set_details for v_$backup_set_details; grant select on v_$backup_set_details to select_catalog_role; create or replace view v_$backup_piece_details as select * from v$backup_piece_details; create or replace public synonym v$backup_piece_details for v_$backup_piece_details; grant select on v_$backup_piece_details to select_catalog_role; create or replace view v_$backup_copy_details as select * from v$backup_copy_details; create or replace public synonym v$backup_copy_details for v_$backup_copy_details; grant select on v_$backup_copy_details to select_catalog_role; create or replace view v_$proxy_copy_details as select * from v$proxy_copy_details; create or replace public synonym v$proxy_copy_details for v_$proxy_copy_details; grant select on v_$proxy_copy_details to select_catalog_role; create or replace view v_$proxy_archivelog_details as select * from v$proxy_archivelog_details; create or replace public synonym v$proxy_archivelog_details for v_$proxy_archivelog_details; grant select on v_$proxy_archivelog_details to select_catalog_role; create or replace view v_$backup_datafile_details as select * from v$backup_datafile_details; create or replace public synonym v$backup_datafile_details for v_$backup_datafile_details; grant select on v_$backup_datafile_details to select_catalog_role; create or replace view v_$backup_controlfile_details as select * from v$backup_controlfile_details; create or replace public synonym v$backup_controlfile_details for v_$backup_controlfile_details; grant select on v_$backup_controlfile_details to select_catalog_role; create or replace view v_$backup_archivelog_details as select * from v$backup_archivelog_details; create or replace public synonym v$backup_archivelog_details for v_$backup_archivelog_details; grant select on v_$backup_archivelog_details to select_catalog_role; create or replace view v_$backup_spfile_details as select * from v$backup_spfile_details; create or replace public synonym v$backup_spfile_details for v_$backup_spfile_details; grant select on v_$backup_spfile_details to select_catalog_role; create or replace view v_$backup_set_summary as select * from v$backup_set_summary; create or replace public synonym v$backup_set_summary for v_$backup_set_summary; grant select on v_$backup_set_summary to select_catalog_role; create or replace view v_$backup_datafile_summary as select * from v$backup_datafile_summary; create or replace public synonym v$backup_datafile_summary for v_$backup_datafile_summary; grant select on v_$backup_datafile_summary to select_catalog_role; create or replace view v_$backup_controlfile_summary as select * from v$backup_controlfile_summary; create or replace public synonym v$backup_controlfile_summary for v_$backup_controlfile_summary; grant select on v_$backup_controlfile_summary to select_catalog_role; create or replace view v_$backup_archivelog_summary as select * from v$backup_archivelog_summary; create or replace public synonym v$backup_archivelog_summary for v_$backup_archivelog_summary; grant select on v_$backup_archivelog_summary to select_catalog_role; create or replace view v_$backup_spfile_summary as select * from v$backup_spfile_summary; create or replace public synonym v$backup_spfile_summary for v_$backup_spfile_summary; grant select on v_$backup_spfile_summary to select_catalog_role; create or replace view v_$backup_copy_summary as select * from v$backup_copy_summary; create or replace public synonym v$backup_copy_summary for v_$backup_copy_summary; grant select on v_$backup_copy_summary to select_catalog_role; create or replace view v_$proxy_copy_summary as select * from v$proxy_copy_summary; create or replace public synonym v$proxy_copy_summary for v_$proxy_copy_summary; grant select on v_$proxy_copy_summary to select_catalog_role; create or replace view v_$proxy_archivelog_summary as select * from v$proxy_archivelog_summary; create or replace public synonym v$proxy_archivelog_summary for v_$proxy_archivelog_summary; grant select on v_$proxy_archivelog_summary to select_catalog_role; create or replace view v_$unusable_backupfile_details as select * from v$unusable_backupfile_details; create or replace public synonym v$unusable_backupfile_details for v_$unusable_backupfile_details; grant select on v_$unusable_backupfile_details to select_catalog_role; create or replace view v_$rman_backup_type as select * from v$rman_backup_type; create or replace public synonym v$rman_backup_type for v_$rman_backup_type; grant select on v_$rman_backup_type to select_catalog_role; create or replace view v_$rman_encryption_algorithms as select * from v$rman_encryption_algorithms; create or replace public synonym v$rman_encryption_algorithms for v_$rman_encryption_algorithms; grant select on v_$rman_encryption_algorithms to select_catalog_role; -- CUT_HERE3 <- tell sed where to chop off the rest >>> # recovery catalog version define rcver <<< CREATE TABLE rcver ( version varchar2(12) NOT NULL, constraint rcver_version_unique unique(version) ) &tablespace& >>> # This libmem sets the rcver table to contain exactly one row, which # describes the version of the recovery catalog. We delete existing rows # first because we used to allow multiple rows in rcver. # This table does not describe the versions of # rman which can run against this recovery catalog. That is done by the # 'getpackageversion' procedure in each catalog package. # This just describes what version of the recovery catalog tables we have. define rcver_update <<< begin delete from rcver; insert into rcver values('11.02.00.03'); commit; end; >>> define prvtrvct_plb <<< -- Copyright (c) 2006, 2011, Oracle and/or its affiliates. -- All rights reserved. -- -- CREATE OR replace PACKAGE BODY dbms_rcvcat IS -- prvtrvct.sql -- -- -- NAME -- prvtrvct.sql -- -- DESCRIPTION -- -- NOTES -- -- MODIFIED (MM/DD/YY) -- pkapil 07/25/11 - Backport pkapil_bug-12597985 from main -- pkapil 06/24/11 - Backport pkapil_bug-12650110 from main -- swerthei 06/13/11 - Backport swerthei_bug-12400752 from main -- fsanchez 05/16/11 - Backport fsanchez_bug-9909828 from -- st_rdbms_11.2.0.1.0 -- banand 03/27/11 - Backport banand_bug-11770005 from main -- debjroy 03/02/11 - Backport debjroy_bug-11809118 from main -- banand 02/14/11 - Backport banand_bug-9971106 from main -- banand 01/26/11 - Backport banand_bug-10292173 from main -- banand 01/26/11 - Backport banand_bug-10143694 from main -- banand 12/28/10 - Backport banand_bug-9799518 from main -- fsanchez 11/24/10 - Backport fsanchez_b9764019_x64 from main -- banand 11/23/10 - Backport banand_bug-9289630 from main -- molagapp 11/29/10 - bump up version to 11.2.0.3 -- molagapp 11/10/10 - Backport 10237171: basebug 10190398 -- jkrismer 06/08/10 - bug-9166466 ora-1422 during DeletedObject resync -- banand 05/21/10 - bug 9576536 -- banand 01/24/10 - bug 9067641 -- banand 09/30/09 - bug 8947742 -- molagapp 07/15/09 - bump up version to 11.2.0.2 -- molagapp 04/29/09 - bump up version to 11.2.0.1 -- molagapp 03/27/09 - bug 5739423 -- molagapp 03/17/09 - bug 8324589 -- banand 03/13/09 - bug 8340871:changeArchivelog to deal with recid/stamp -- molagapp 07/10/08 - bug 7215002 -- banand 07/08/08 - bug-7117200 -- banand 06/26/08 - bug 7173341: fix cleanupRSR performance -- banand 05/30/08 - bug 7138218 -- banand 04/22/08 - bug 6993175 done txn bug-6965089 -- fsanchez 06/05/07 - tspitr datapump -- jkrismer 02/29/08 - bug-6055481 bsStatusRecalc only from 9.2 client -- molagapp 02/13/08 - bug-6774767 -- banand 01/26/08 - bug 6750214:df.rfile# and df.create_time can be null -- banand 12/05/07 - bug-6653570: fix tempfile resync in DG env. -- jkrismer 11/21/07 - bug 5906892 fix ORA-12899 when differnt charset -- jciminsk 10/22/07 - Upgrade support for 11.2 -- jciminsk 10/08/07 - version to 11.2.0.0.0 -- molagapp 10/02/07 - bug 6451722 -- banand 08/15/07 - fix update brl SQL and cleanupROUT procedure -- jciminsk 08/03/07 - version to 11.1.0.7.0 -- molagapp 06/05/07 - bump up version to 11.1.0.6 -- jkrismer 06/12/07 - Bug 5932029 reset db high_ic_recid in beginCkpt -- raguzman 05/11/07 - ReOrg restore point resync, add proxy copy affect -- banand 06/05/07 - bug-6034995 -- molagapp 05/15/07 - bug-5939669 -- banand 05/10/07 - track remote cf not updated during resync -- banand 05/01/07 - bug 6011303 -- banand 05/01/07 - backupset reset db_unique_name allowed -- banand 04/23/07 - bug 5971763 -- banand 04/08/07 - package support for file sharing attributes -- molagapp 04/18/07 - bump up version to 11.1.0.5 -- banand 03/26/07 - bug 5885624 - validate db_id for resetDatabase -- jkrismer 03/27/07 - 5932181 and 5934290 fix resync for temp -- molagapp 04/02/07 - bug 5899994 -- raguzman 02/19/07 - checkDeletedObject should always update site_dfatt -- banand 03/01/07 - update site_key of duplicate record during resync -- banand 02/23/07 - water mark usage at stby for non-null db_unique_name -- molagapp 02/14/07 - bump up version to 11.1.0.4 -- banand 02/21/07 - update comments and minor fixes -- banand 01/29/07 - resync at standby to use last full resync ckpt scn -- banand 01/26/07 - add encrypted, backed_by_osb -- banand 08/31/06 - bug-5647645 -- banand 11/22/06 - resync conf from catalog cf first time, if there are -- configurations for that site. -- banand 11/08/06 - bug 5219484 -- molagapp 11/01/06 - bump up version to 11.1.0.3 -- banand 09/10/06 - bug 5441981 -- molagapp 10/04/06 - bump up version to 11.1.0.2 -- molagapp 09/15/06 - bump up version to 11.1.0.1 -- raguzman 08/07/06 - Resync time for guaranteed restore points -- swerthei 08/02/06 - bug 5364391, remove date conversion in bcf insert -- molagapp 07/13/06 - refix bug-2107554 -- amjoshi 07/05/06 - lrg-2384619. -- raguzman 06/10/06 - Resync normal restore points -- cpedrega 06/12/06 - dbuname set to 30 (=KRMK_KDBUNMAXLEN); -- fixed SELECT SUBSTR for db_uname -- banand 06/10/06 - 17844_phase_3: unregistersite remove conf rows also -- amjoshi 06/08/06 - Update node table on full resync. -- molagapp 05/29/06 - improve block corruption project -- swerthei 10/12/05 - virtual private catalog -- swerthei 01/07/06 - multi-section backups -- banand 05/09/06 - 17844_phase_2: spfile/change/resync changes -- molagapp 01/23/06 - backup transportable tablespace -- molagapp 03/20/06 - bug-5106952 -- molagapp 12/21/05 - merge catalog project -- banand 12/27/05 - 17844_phase_1: track site specific info -- fsanchez 01/05/06 - keep track of reason and actions of full resync -- amjoshi 03/17/06 - LRG 2106205: fix resync. code for datafiles. -- banand 12/27/05 - schema changes to track node specific info -- molagapp 01/30/06 - bug-4941096 -- molagapp 01/16/06 - bump up version to 11.1.0.0 -- molagapp 12/12/05 - bug 4754328 -- banand 12/07/05 - bug 4755799 -- banand 09/28/05 - bug 4637849 -- molagapp 10/03/05 - update versionList -- molagapp 09/17/05 - fix changeBackuppiece -- molagapp 08/05/05 - bug 4531791 -- banand 05/19/05 - schema change for encrypted backup configurations -- fsanchez 11/02/04 - backup optimization -- banand 03/22/05 - bug-3877184 -- molagapp 03/18/05 - add getLogHistoryLowSCN -- molagapp 03/02/05 - bug-4146404 -- molagapp 02/15/05 - rewrite sql query for performance -- molagapp 02/14/05 - remove unnecessary to_date -- molagapp 01/26/05 - bug-3959063: add isDuplicateRecord -- banand 01/05/05 - bug-3966722 -- fsanchez 08/25/04 - bug 2794801 - get current dbinc -- banand 09/17/04 - bug 3888851 -- molagapp 08/27/04 - remove guaranteed_flashback_scn -- molagapp 08/18/04 - change default block_size as null -- banand 04/20/04 - enhance job views -- molagapp 05/18/04 - add guaranteed_flashback_scn -- molagapp 05/20/04 - add new_incarnation to resetDatabase -- molagapp 05/01/04 - tempfile re-creation project -- molagapp 03/23/04 - bug-3527769 -- jeffyu 02/06/04 - bug 3234433 -- fsanchez 02/13/04 - Rewrite checkOfflineRange procedure -- molagapp 02/11/04 - bug 3310413 -- banand 10/10/03 - bug 3134939 -- molagapp 10/16/03 - remove is_recovery_dest_file from checkDataFile -- swerthei 09/12/03 - add bdf.blocks_read -- fsanchez 09/29/03 - lrg_1578516 -- molagapp 09/10/03 - update bs status while removing duplicate handles -- molagapp 09/10/03 - fix update to rlh table with clear status -- sdizdar 09/09/03 - bug-3115984: add rename "delete object" -- molagapp 09/05/03 - lrg 1564671: close scrlQ cursor at end of fetch -- jeffyu 08/26/03 - modifying updateOldestFlashbackSCN - lrg 1562424 -- sdizdar 08/21/03 - bug-3005920 -- sdizdar 08/19/03 - add more debugs in checkRmanStatus -- jeffyu 07/21/03 - bug 2976535 -- molagapp 06/09/03 - fix force_resync2cf update -- molagapp 05/06/03 - allow uncatalog of bp when db is not mounted -- banand 05/14/03 - multi-node RMAN configuration support -- molagapp 05/30/03 - fix keep_options default values -- molagapp 03/23/03 - use dbms_output buffer_size as null -- fsanchez 01/20/03 - enhanced_scripts -- sdizdar 01/30/03 - prj 2090 (compressed backup): -- - add resync of compressed flag -- fsanchez 03/26/03 - bug-2845436 -- swerthei 02/21/03 - bug 2803823; make cleanupCKP query more efficient -- sdizdar 02/25/03 - remove rsr from ckptNeeded() -- banand 02/13/03 - bug 2802688: fix updating of duplicate names for al -- molagapp 02/04/03 - fix 10i package compatibility -- mjaeger 02/02/03 - bug 2719863: changeDatafileCopy: add cond on db key -- mjaeger 12/10/02 - bug 2554861: lockForCkpt: avoid deadlock on CKP -- sdizdar 09/03/02 - add resync of RC_RMAN_STATUS -- molagapp 08/20/02 - recovery area project -- banand 08/26/02 - dbinc_status values as in varchar2 -- banand 08/09/02 - Recovery thru resetlogs proj: -- - get resetlogs stamps for offr and orl -- - add recomputeDbincStatus -- nsadaran 09/30/02 - fixing comments -- jeffyu 09/19/02 - Added rename tablespace support in resync -- mdilman 09/18/02 - support for bigfile column in rc_tablespace -- molagapp 05/24/02 - delete cdf entry with status 'D' -- sdizdar 05/22/02 - modify unregisterDatabase -- molagapp 05/13/02 - fix update no_of_pieces in checkBackupPiece -- molagapp 04/17/02 - proxy archived log -- molagapp 01/06/02 - catalog backuppiece support -- banand 02/01/02 - fix 2210440 -- molagapp 01/31/01 - change default value for cftype in beginCkpt -- molagapp 11/29/01 - update package version 9.2.0 -- molagapp 11/13/01 - bug 2107554 -- fsanchez 11/10/01 - bug-2071872 -- molagapp 10/29/01 - fix standby/primary switch full resync -- molagapp 05/27/01 - bug 1530744 -- molagapp 10/08/01 - fix default value for scanned -- sdizdar 09/08/01 - SPFILE backup: add functions for resync -- swerthei 08/03/01 - close cursors in cancelCkpt -- banand 07/24/01 - fix 1856783 -- banand 07/02/01 - fix 758489 -- fsanchez 05/23/01 - dbnewid -- sdizdar 06/26/01 - bug-1852923: fix beginConfigResync() -- swerthei 05/08/01 - code cleanup -- swerthei 04/04/01 - add v$datafile_copy.scanned -- fsanchez 04/11/01 - remove_get_put -- swerthei 04/03/01 - add new setDatabase calls for debugging -- swerthei 03/05/01 - ckp table cleanup -- fsanchez 02/15/01 - bug-1538834 -- fsanchez 01/25/01 - bug-1586048 -- molagapp 11/30/00 - bug 1518515 -- sdizdar 11/12/00 - bug-1496982: 8.2 -> 9.0 -- molagapp 10/26/00 - bug-1332121 -- molagapp 10/24/00 - bug-1478785 -- molagapp 10/16/00 - bug-1467871 -- sdizdar 09/21/00 - fix 8.x compatibility -- - modify ckptNeeded -- fsanchez 05/30/00 - cfile-autobackup -- sdizdar 09/11/00 - tablespace resync fix: -- - fix ckptNeeded (add recid of tablespace records) -- - improved beginTableSpaceResyn -- molagapp 08/28/00 - fix 8.2 upgrade -- dbeusee 06/28/00 - rman82_maint_syntax_unification -- dbeusee 07/20/00 - rman82_debug_enhancements -- fsanchez 07/31/00 - backup_flat_files -- sdizdar 06/28/00 - Configure auxfilename and exclude tablespace: -- - setDatabase calls dbms_rcvman.setDatabase -- - add getCloneName, -- - updated checkDatafile and checkTablespace -- fsanchez 05/30/00 - cfile-autobackup -- sdizdar 05/12/00 - RMAN retention policy (keep): -- - keep backup support -- banand 05/19/00 - use value in findConfig_c cursor -- swerthei 06/06/00 - add archived log logminer dictionary columns -- dbeusee 04/13/00 - rman82_cf_status_unification -- molagapp 05/18/00 - rfile# and recovered changes -- fsanchez 03/22/00 - instantiate_standby -- sdizdar 04/17/00 - RMAN configuration: -- - add setConfig, resetConfig, deleteConfig, getConfig -- swerthei 04/04/00 - allow orphaned blocks in bcb -- molagapp 02/25/00 - bug 1186598: remove compatible, undo deleteAL change -- sdizdar 02/18/00 - bug-977412: added commit to -- deleteScript, createScript and replaceScript -- swerthei 09/14/99 - add dbms_rcvcat.reNormalize -- gpongrac 08/16/99 - add 8.1.6 to versionlist -- gpongrac 08/04/99 - really delete records now -- gpongrac 07/14/99 - add stop_time to df table -- gpongrac 07/06/99 - change rcver to 8.1.6 -- gpongrac 12/18/98 - clean up checkBackupPiece -- dbeusee 09/13/98 - bug-728666 -- rlu 05/03/99 - 621515_restore_main -- gpongrac 04/08/99 - change REM to -- -- rlu 11/25/98 - bug_621515 -- swerthei 10/22/98 - change proxy messages -- fsanchez 10/07/98 - bug-607271 -- swerthei 06/19/98 - prepare for wrapping to recover.bsq -- swerthei 06/17/98 - make compatible with 8.0 -- swerthei 06/01/98 - add media_pool -- swerthei 06/01/98 - add changeProxyCopy -- swerthei 05/18/98 - resync proxy copy records -- dbeusee 04/20/98 - rpt_redundancy_enh -- gpongrac 05/06/98 - add bsStatusRecalc -- dbeusee 04/06/98 - xcheck enh -- fsanchez 03/29/98 - Duplexed backup sets -- gpongrac 01/27/98 - change getCatalogVersion -- gpongrac 01/16/98 - bug 612344: deal with null fname in checkDatafile -- fsanchez 01/04/98 - Allow setDatabase to receive dbid without dbname -- gpongrac 09/02/97 - add setDatafile Size -- tpystyne 09/12/97 - bug 480172, fix name translation -- gpongrac 08/12/97 - deal with clone_name becoming the real filename -- gpongrac 07/01/97 - fix typo -- gpongrac 06/30/97 - keep offline clean and read-only scn in df table -- gpongrac 06/30/97 - record current offline range in kccor -- gpongrac 06/26/97 - uppercase all keywords and reformat -- gpongrac 04/08/97 - deal with 0 creation scn in checkofflinerange -- gpongrac 04/03/97 - consider offline ranges in ckptneeded -- gpongrac 03/31/97 - change to use version_time instead of cf_create_t -- gpongrac 03/31/97 - add cf_create_time to offr -- tpystyne 03/20/97 - update catalog version to 8.00.03 -- tpystyne 02/27/97 - add ckptneeded -- gpongrac 02/20/97 - add compltion_time to bdf -- swerthei 01/14/97 - add setclonename -- ### comments from 1996 removed type version_list_type is table of varchar2(11) index by binary_integer; version_list version_list_type; version_max_index binary_integer; version_counter binary_integer := 1; /*-----------* * Constants * *-----------*/ MAXNUMVAL CONSTANT NUMBER := 2**32-1; catalogVersion CONSTANT VARCHAR2(11) := '11.02.00.03'; /*-------------------------* * Package State Variables * *-------------------------*/ -- Package State Variables: -- debug -- Controls whether deb() sends debug info back to RMAN. -- this_db_key -- This is the primary key of the db record for the target database we -- are dealing with. It is set as a result of calling registerDatabase, -- resetDatabase or setDatabase. -- this_dbinc_key -- This is the primary key of the dbinc for the incarnation of the target -- database we are dealing with. It is set as a result of calling -- registerDatabase, resetDatabase or setDatabase. -- this_ckp_key -- This is the primary key of the current recovery catalog checkpoint -- or 0 for partial checkpoint. -- It is set when beginCkpt is called, and cleared to null when endCkpt -- is called. -- last_* -- Used to ensure that record are passed to check* procedures -- in ascending order. -- -- see Cursor Row Variables section for more state variables debug BOOLEAN := FALSE; this_ckp_key NUMBER := NULL; this_ckp_scn NUMBER := NULL; this_ckp_time DATE := NULL; last_full_ckp_scn NUMBER := NULL; last_ts# NUMBER; last_file# NUMBER; last_thread# NUMBER; last_fname site_dfatt.fname%type; last_ts_recid NUMBER; last_df_recid NUMBER; last_tf_recid NUMBER; last_rt_recid NUMBER; last_orl_recid NUMBER; last_conf_recid NUMBER; force_resync2cf VARCHAR2(3) := 'NO'; last_rlh_recid NUMBER; last_al_recid NUMBER; last_offr_recid NUMBER; last_bs_recid NUMBER; last_bp_recid NUMBER; last_bdf_recid NUMBER; last_bsf_recid NUMBER; last_brl_recid NUMBER; last_cdf_recid NUMBER; last_bcb_recid NUMBER; last_ccb_recid NUMBER; last_do_recid NUMBER; last_xdf_recid NUMBER := NULL; last_xal_recid NUMBER := NULL; last_rsr_recid NUMBER; last_rout_stamp NUMBER := NULL; last_inst_startup_stamp NUMBER := NULL; lrsr_key NUMBER; lrout_skey NUMBER; lsession_recid NUMBER; lsession_stamp NUMBER; lrman_status_recid NUMBER; lrman_status_stamp NUMBER; -- 5906892 krbmror_llength_bytes NUMBER := 130; -- bug 10143694: cache rout rows and do bulk insert into this table to -- improve performance. type rout_list is table of rout%ROWTYPE index by binary_integer; lrout_table rout_list; lrout_curridx binary_integer := 0; -- last_ic_recid contains Non-NULL value, if incarnation records are -- resynced using recids. last_ic_recid NUMBER := NULL; scr_key NUMBER := NULL; scr_line NUMBER; scr_glob BOOLEAN; kccdivts NUMBER; type bskeys is table of number index by binary_integer; cntbs NUMBER := 0; updatebs bskeys; last_reset_scn NUMBER; last_reset_time DATE; last_dbinc_key NUMBER; do_temp_ts_resync BOOLEAN := FALSE; -- indicates if temp_ts is resynced last_cf_version_time DATE; dbglvl NUMBER := RCVCAT_LEVEL_DEFAULT; low_nrsp_recid NUMBER; last_nrsp_recid NUMBER; last_grsp_recid NUMBER; last_rspname grsp.rspname%type; last_bcr_recid NUMBER; last_resync_cksum NUMBER; -- -- NOTE :: this_cf_type usage -- -- Starting 11g catalog schema, we keep track of high water marks for primary -- and standby database if client supplies non-null db_unique_name. The reason -- we can not keep track of water marks for standby when db_unique_name is -- null (when connected 9iR2 RMAN), is there is only one row in NODE table with -- db_unique_name value NULL. And we don't want to change behavior of -- optimizing resync at primary in this case. If we update water marks every -- time when a new standby control file or primary control file is seen, it -- could cause resync of all records from primary and standby if RMAN is -- connected to primary and standby alternatively. Instead of paying penalty -- in this case, we just don't keep track of water marks for standby cf. -- -- This variable MUST be used in all circular record beginresync/endresync -- code to return 0 as recid and not to update high water mark if we can not -- keep track of high water marks for particular control file type. -- this_cf_type VARCHAR2(7) := NULL; -- Db unique name (once it was called service name) this_db_unique_name VARCHAR2(30) := NULL; this_site_key NUMBER; -- Never NULL even for 9i RMAN client client_site_aware boolean := FALSE; -- see prvtrmnu.sql for semantics of below 3 variables logs_shared number := FALSE#; -- used only when client_site_aware disk_backups_shared number := TRUE#; -- indicates shared accross all sites tape_backups_shared number := TRUE#; -- indicates shared accross all sites reNorm_state binary_integer; RENORM_DFATT CONSTANT binary_integer := 1; RENORM_ORL CONSTANT binary_integer := 2; RENORM_AL CONSTANT binary_integer := 3; RENORM_BP CONSTANT binary_integer := 4; RENORM_CCF CONSTANT binary_integer := 5; RENORM_CDF CONSTANT binary_integer := 6; RENORM_TFATT CONSTANT binary_integer := 7; -- data_type to track session circular section water marks. When controlfile -- is backup, we try to resync all records starting from mined resync -- timestamp. Even if mining fails, we will just pay the cost for resync -- only once in the session. Later, this sessionWaterMark will maintain -- the watermarks for the session which will make the subsequent resync -- faster. type sessionWaterMarks_t is record ( last_kccdivts number := 0, -- check if ctl version is diff high_rout_stamp number := 0, -- for rman_output resync high_ic_recid number := 0, -- incarnation recid high_offr_recid number := 0, -- offline range (kkor) recid high_rlh_recid number := 0, -- log history (kcclh) recid high_al_recid number := 0, -- archived log (kccal) recid high_bs_recid number := 0, -- backup set (kccbs) recid high_bp_recid number := 0, -- backup piece (kccbp) recid high_bdf_recid number := 0, -- backup datafile (kccbf) recid high_cdf_recid number := 0, -- datafile copy (kccdc) recid high_brl_recid number := 0, -- backup redo log (kccbl) recid high_bcb_recid number := 0, -- backup datafile corruption recid high_ccb_recid number := 0, -- datafile copy corruption recid high_do_recid number := 0, -- deleted object recid high_pc_recid number := 0, -- proxy copy (kccpc) recid high_bsf_recid number := 0, -- backup SPFILE (kccbi) recid high_rsr_recid number := 0, -- RMAN status (kccrsr) recid high_nrsp_recid number := 0, -- normal restore point recid high_bcr_recid number := 0 -- high blk crpt (kccblkcor) recid ); init_sessionWaterMarks sessionWaterMarks_t; prev_sessionWaterMarks sessionWaterMarks_t; sessionWaterMarks sessionWaterMarks_t; -- data_types used in import catalog implementation type ts_name_list is table of ts.ts_name%type index by binary_integer; type numTab_t is table of number index by binary_integer; type key_columns_list is table of varchar2(30); -- variables for import catalog implementation -- -- list of database id that will be imported. import_dbid numTab_t; -- -- If you have a new column name that is generated by rman_seq, then -- add your new column here so that IMPORT CATALOG will know to -- increment that column value. -- -- Key column names that are used in rman catalog schema. They should -- end with key. These values must be generated by rman_seq. -- key_columns CONSTANT key_columns_list := key_columns_list ('DB_KEY' , 'DBINC_KEY' , 'CURR_DBINC_KEY' , 'PARENT_DBINC_KEY', 'CKP_KEY' , 'START_CKP_KEY' , 'END_CKP_KEY' , 'OFFR_KEY' , 'RR_KEY' , 'RLH_KEY' , 'AL_KEY' , 'BS_KEY' , 'BP_KEY' , 'BCF_KEY' , 'CCF_KEY' , 'XCF_KEY' , 'BSF_KEY' , 'BDF_KEY' , 'CDF_KEY' , 'XDF_KEY' , 'XAL_KEY' , 'BRL_KEY' , 'BDF_KEY' , 'RSR_KEY' , 'RSR_PKEY' , 'RSR_L0KEY' , 'SCR_KEY' , 'ROUT_SKEY' , 'SITE_KEY' , 'DF_KEY' , 'TF_KEY'); -- Global variables that represent dblink of the source recovery catalog -- and the offset at which the key columns has to be incremented import_dblink tempres.name%type; import_offset number; /*---------* * Cursors * *---------*/ -- Package Cursors: -- tsQ -- Used to resync the list of tablespaces. -- dfQ -- Used to resync the list of datafiles. -- tfQ -- Used to resync the list of tempfiles. -- rtQ -- Used to resync the list of threads. -- orlQ -- Used to resync the list of online redo logs. -- grspQ -- Used to resync the list of guaranteed restore point. -- scrlQ -- Used to fetch lines from a stored script. -- select all current tablespaces in this database incarnation cursor tsQ IS SELECT ts.ts_name, ts.ts#, ts.create_scn, ts.create_time, tsatt.rbs_count, ts.included_in_database_backup, ts.bigfile, ts.temporary, ts.encrypt_in_backup, ts.plugin_scn FROM ts, tsatt WHERE ts.dbinc_key = tsatt.dbinc_key AND ts.ts# = tsatt.ts# AND ts.create_scn = tsatt.create_scn AND ts.dbinc_key = this_dbinc_key AND ts.plugin_scn = tsatt.plugin_scn AND ts.drop_scn IS NULL -- skip ones we know were dropped AND tsatt.end_ckp_key IS NULL ORDER BY ts.ts#; -- client passes rows to checkTs in -- ascending ts# order. We can detect -- new or dropped tablespaces this way. -- select all datafiles in this database incarnation at this site cursor dfQ IS SELECT df.file#, df.create_scn, df.create_time, df.plugin_scn, df.ts#, site_dfatt.fname, df.blocks, df.clone_fname, df.stop_scn, df.read_only, df.plugged_readonly, df.create_thread, df.create_size, df.foreign_dbid FROM df, site_dfatt WHERE df.dbinc_key = this_dbinc_key -- our dbinc please AND df.drop_scn IS NULL -- df not dropped AND this_site_key = site_dfatt.site_key(+) -- select names for the site AND df.df_key = site_dfatt.df_key(+) -- join site_dfatt to df ORDER BY df.file#; -- client passes rows to checkDf in -- ascending file# order. We can detect -- new datafiles this way -- select all tempfiles in this database incarnation cursor tfQ IS SELECT tf.file#, tf.create_scn, tf.create_time, tf.ts#, site_tfatt.fname, site_tfatt.blocks, site_tfatt.autoextend, site_tfatt.max_size, site_tfatt.next_size, tf.tf_key tf_key FROM tf, site_tfatt WHERE tf.dbinc_key = this_dbinc_key -- our dbinc please AND this_site_key = site_tfatt.site_key -- select names for the site AND tf.tf_key = site_tfatt.tf_key -- join site_tfatt to tf AND site_tfatt.drop_scn IS NULL -- tf not dropped ORDER BY tf.file#; -- client passes rows to checkTf in -- ascending file# order. We can detect -- new tempfiles this way -- select all redo threads in this database incarnation cursor rtQ IS SELECT rt.thread#, rt.sequence#, rt.enable_scn, rt.enable_time, rt.status FROM rt WHERE rt.dbinc_key = this_dbinc_key ORDER BY rt.thread#; -- select all online redo logs in this database incarnation cursor orlQ IS SELECT orl.thread#, orl.group#, orl.fname FROM orl WHERE orl.dbinc_key = this_dbinc_key AND orl.site_key = this_site_key ORDER BY nlssort(orl.fname, 'NLS_COMP=ANSI NLS_SORT=ASCII7'); -- bug 2107554 -- select all guaranteed and preserved restore points cursor grspQ IS SELECT grsp.rspname, grsp.from_scn, grsp.to_scn FROM grsp, dbinc WHERE grsp.dbinc_key = dbinc.dbinc_key AND dbinc.db_key = this_db_key AND grsp.site_key = this_site_key ORDER BY nlssort(grsp.rspname, 'NLS_COMP=ANSI NLS_SORT=ASCII7'); -- select all undeleted pieces that match the device_type and handle -- as per backup sharing attributes. cursor bpq(device_type VARCHAR2, handle VARCHAR2, bp_recid VARCHAR2, bp_stamp VARCHAR2) IS SELECT bp_key, bs_key FROM bp WHERE db_key = this_db_key AND device_type = bpq.device_type AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))) AND handle = bpq.handle AND handle_hashkey = substr(bpq.device_type,1,10) || substr(bpq.handle,1,10) || substr(bpq.handle,-10) AND NOT (bp_recid = bpq.bp_recid AND bp_stamp = bpq.bp_stamp); -- select all lines from a stored script cursor scrlQ(key NUMBER) IS SELECT text FROM scrl WHERE scr_key = key ORDER BY linenum; -- Get all the recovery catalog versions cursor rcverQ IS SELECT version FROM rcver ORDER BY version; cursor reNorm_dfatt_c IS SELECT fname FROM site_dfatt WHERE df_key in (select df_key from df, dbinc where df.dbinc_key = dbinc.dbinc_key and dbinc.db_key = this_db_key) FOR UPDATE; cursor reNorm_orl_c IS SELECT fname FROM orl WHERE dbinc_key in (select dbinc_key from dbinc where db_key = this_db_key) FOR UPDATE; cursor reNorm_al_c IS SELECT fname FROM al where dbinc_key in (select dbinc_key from dbinc where db_key = this_db_key) FOR UPDATE; cursor reNorm_bp_c IS SELECT handle FROM bp WHERE device_type = 'DISK' and db_key = this_db_key FOR UPDATE; cursor reNorm_ccf_c IS SELECT fname FROM ccf WHERE dbinc_key in (select dbinc_key from dbinc where db_key = this_db_key) FOR UPDATE; cursor reNorm_cdf_c IS SELECT fname FROM cdf WHERE dbinc_key in (select dbinc_key from dbinc where db_key = this_db_key) FOR UPDATE; cursor reNorm_tfatt_c IS SELECT fname FROM site_tfatt WHERE tf_key in (select tf_key from tf, dbinc where tf.dbinc_key = dbinc.dbinc_key and dbinc.db_key = this_db_key) FOR UPDATE; cursor lscrnames_c(glob number, allnames number) IS select 1 oby, rdbi.db_name dname, s.scr_name sname, s.scr_comment scomm from db rdb, dbinc rdbi, scr s where lscrnames_c.glob is null and lscrnames_c.allnames is null and rdbi.dbinc_key = rdb.curr_dbinc_key and rdb.db_key = s.db_key and s.db_key = this_db_key and s.db_key is not NULL UNION ALL select 2, 'ORA%GLOB', s.scr_name, s.scr_comment from scr s where s.db_key IS NULL UNION ALL select 3, rdbi.db_name, s.scr_name, s.scr_comment from db rdb, dbinc rdbi, scr s where lscrnames_c.glob is null and lscrnames_c.allnames is not null and rdbi.dbinc_key = rdb.curr_dbinc_key and rdb.db_key = s.db_key and s.db_key is not NULL order by 1 asc, 2 asc, 3 asc; /*----------------------* * Cursor Row Variables * *----------------------*/ -- Cursor Row Variables: -- tsRec -- Holds 1 row from tsQ. tsRec.ts# is null when not doing a tablespace -- resync. tsRec.ts# is set to MAXNUMVAL when tsQ reaches end-of-fetch. -- dfRec -- Holds 1 row from dfQ. dfRec.file# is null when not doing a datafile -- resync. dfRec.file# is set to MAXNUMVAL when dfQ reaches end-of-fetch. -- tfRec -- Holds 1 row from tfQ. tfRec.file# is null when not doing a tempfile -- resync. tfRec.file# is set to MAXNUMVAL when tfQ reaches end-of-fetch. -- rtRec -- Holds 1 row from rtQ. rtRec.thread# is null when not doing a datafile -- resync. rtRec.thread# is set to MAXNUMVAL when rtQ reaches end-of-fetch. -- orlRec -- Holds 1 row from orlQ. orlRec.fname is null when not doing a datafile -- resync. orlRec.fname is set to char(255) when orlQ reaches end-of-fetch. -- grspRec -- Holds 1 row from grspQ. grspRec.rspname is null when not doing a -- guaranteed restore point resync. grspRec.rspname is set to char(255) -- when grspQ reaches end-of-fetch. tsRec tsQ%rowtype; dfRec dfQ%rowtype; tfRec tfQ%rowtype; rtRec rtQ%rowtype; orlRec orlQ%rowtype; grspRec grspQ%rowtype; /*---------------* * Private Types * *---------------*/ /*-------------------* * Private functions * *-------------------*/ PROCEDURE setDebugOn(dbglevel IN NUMBER DEFAULT RCVCAT_LEVEL_DEFAULT) IS BEGIN -- -- Passing buffer_size as null is an undocumented way to buffer -- unlimited number of rows. PL/SQL storage is the limit -- dbms_output.enable(buffer_size => null); debug := TRUE; dbglvl := dbglevel; END; PROCEDURE setDebugOff IS BEGIN dumpPkgState('Debug off'); dbms_output.disable; -- free memory debug := FALSE; END; PROCEDURE deb(line IN varchar2 ,level IN number DEFAULT RCVCAT_LEVEL_DEFAULT) IS buffer_overflow exception; BEGIN if debOK(level) then dbms_output.put_line('DBGRCVCAT: '||line); end if; EXCEPTION WHEN others THEN dbms_output.put_line('caught exception during deb ' || substr(sqlerrm, 1, 512)); END deb; FUNCTION debOK(level IN number DEFAULT RCVCAT_LEVEL_DEFAULT) RETURN boolean IS BEGIN return (debug and dbglvl >= level); END debOK; -- Ensure that the resync call can go ahead - i.e. beginckpt and setdatabase -- have been called. PROCEDURE checkResync IS BEGIN IF (this_ckp_key IS NULL) THEN raise_application_error(-20031, 'Resync not started'); END IF; IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; IF (this_site_key IS NULL) THEN raise_application_error(-20199, 'Database site key not set'); END IF; END checkResync; -- Note: this is a copy of a function in recover.txt function date2stamp(dt IN date) return number is stamp number; begin stamp := (((((to_number(to_char(dt, 'YYYY'))-1988)*12 + (to_number(to_char(dt, 'MM'))-1))*31 + (to_number(to_char(dt, 'DD'))-1))*24 + (to_number(to_char(dt, 'HH24'))))*60 + (to_number(to_char(dt, 'MI'))))*60 + (to_number(to_char(dt, 'SS'))); return stamp; end; -- Note: this is a copy of a function in recover.txt function stamp2date(stamp IN number) return date IS x number; dt varchar2(19); begin x := stamp; dt := to_char(mod(x,60), 'FM09'); -- seconds x := floor(x/60); dt := to_char(mod(x,60), 'FM09') || ':' || dt; -- minutes x := floor(x/60); dt := to_char(mod(x,24), 'FM09') || ':' || dt; -- hours x := floor(x/24); dt := to_char(mod(x,31)+1, 'FM09') || ' ' || dt; -- days x := floor(x/31); dt := to_char(mod(x,12)+1, 'FM09') || '/' || dt; -- months dt := to_char(floor(x/12)+1988) || '/' || dt; return to_date(dt, 'YYYY/MM/DD HH24:MI:SS'); end; -- recompute incarnation status for all incarnations from dbinc_key PROCEDURE recomputeDbincStatus(db_key IN NUMBER, dbinc_key IN NUMBER) IS -- Recursively calls itself to set status in its parent incarnation. PROCEDURE updateDbincStatus(db_key IN NUMBER, dbinc_key IN NUMBER) IS parent_key NUMBER; BEGIN BEGIN deb('updateDbincStatus - for db_key='||db_key||' dbinc='||dbinc_key); update dbinc set dbinc_status='PARENT' where dbinc_key = updateDbincStatus.dbinc_key and db_key = updateDbincStatus.db_key; -- find parent and then set its status select parent_dbinc_key into parent_key from dbinc where dbinc_key= updateDbincStatus.dbinc_key and db_key = updateDbincStatus.db_key; updateDbincStatus(db_key, parent_key); deb('updateDbincStatus - normal return for dbinc=' || dbinc_key); EXCEPTION WHEN no_data_found THEN deb('updateDbincStatus- Last parent is ' || dbinc_key); IF (dbinc_key is NOT NULL) THEN -- set last incarnation in the chain update dbinc set dbinc_status='PARENT' where dbinc_key=updateDbincStatus.dbinc_key and db_key = updateDbincStatus.db_key; END IF; return; -- reached the last known parent WHEN OTHERS THEN deb('updateDbincStatus - rollback all, release locks'); rollback; RAISE; END; END updateDbincStatus; BEGIN -- and make the given incarnation of the database as current UPDATE db SET curr_dbinc_key = recomputeDbincStatus.dbinc_key WHERE db_key = recomputeDbincStatus.db_key and db_key = recomputeDbincStatus.db_key; UPDATE dbinc SET dbinc_status='ORPHAN' WHERE dbinc.db_key = recomputeDbincStatus.db_key; updateDbincStatus(db_key, dbinc_key); UPDATE dbinc SET dbinc_status='CURRENT' where dbinc_key=recomputeDbincStatus.dbinc_key and db_key = recomputeDbincStatus.db_key; END recomputeDbincStatus; /*-------------------* * Register Database * *-------------------*/ PROCEDURE registerDatabase(db_id IN NUMBER ,db_name IN VARCHAR2 ,reset_scn IN NUMBER ,reset_time IN DATE ) IS local dbinc%rowtype; -- local variables BEGIN -- verify that this package is compatible with the recovery catalog BEGIN SELECT NULL INTO local.db_key FROM rcver WHERE version = catalogVersion; EXCEPTION WHEN no_data_found THEN raise_application_error(-20298, 'Not compatible recovery catalog'); END; IF (this_ckp_key IS NOT NULL) THEN raise_application_error(-20030 , 'Resync in progress'); END IF; this_db_key := NULL; this_dbinc_key := NULL; BEGIN INSERT INTO db(db_key, db_id) VALUES(rman_seq.nextval, db_id); EXCEPTION WHEN dup_val_on_index THEN raise_application_error(-20002, 'Database already registered'); END; SELECT rman_seq.currval INTO local.db_key FROM dual; INSERT INTO dbinc (dbinc_key, db_key, db_name, reset_scn, reset_time) VALUES (rman_seq.nextval, local.db_key, upper(db_name), reset_scn,reset_time); SELECT rman_seq.currval INTO local.dbinc_key FROM dual; -- make it current recomputeDbincStatus(local.db_key, local.dbinc_key); deb('registerDatabase - adding a row to site table, with null db_unique_name'); INSERT INTO node(db_key, force_resync2cf, database_role, site_key) VALUES(local.db_key, 'NO', 'PRIMARY', rman_seq.nextval); setReason(RESYNC_REASON_NOACTION); deb('registerDatabase - commit, release locks'); commit; -- rollback on error EXCEPTION WHEN OTHERS THEN deb('registerDatabase - rollback, released all locks'); rollback; RAISE; END registerDatabase; -- register a new database incarnation and make it the current incarnation -- after opening the database with resetlogs option PROCEDURE resetDatabase(db_id IN NUMBER ,db_name IN VARCHAR2 ,reset_scn IN NUMBER ,reset_time IN DATE ,parent_reset_scn IN NUMBER ,parent_reset_time IN DATE ) IS local dbinc%rowtype; -- local variables BEGIN -- verify that this package is compatible with the recovery catalog BEGIN SELECT NULL INTO local.db_key FROM rcver WHERE version = catalogVersion; EXCEPTION WHEN no_data_found THEN raise_application_error(-20298, 'Not compatible with recovery catalog'); END; IF (this_ckp_key IS NOT NULL) THEN raise_application_error(-20030, 'Resync in progress'); END IF; IF (db_id IS NULL) THEN raise_application_error(-20007, 'db_id is null'); END IF; this_db_key := NULL; this_dbinc_key := NULL; BEGIN SELECT db_key, curr_dbinc_key INTO local.db_key, local.dbinc_key FROM db WHERE db.db_id = resetDatabase.db_id; -- should return 1 row EXCEPTION WHEN no_data_found THEN raise_application_error(-20001, 'Database not found'); END; -- find the parent of the new incarnation BEGIN SELECT dbinc_key INTO local.parent_dbinc_key FROM dbinc WHERE dbinc.db_key = local.db_key AND dbinc.reset_scn = resetDatabase.parent_reset_scn AND dbinc.reset_time = resetDatabase.parent_reset_time; EXCEPTION WHEN no_data_found THEN local.parent_dbinc_key := NULL; END; -- insert the new incarnation BEGIN INSERT INTO dbinc (dbinc_key, db_key, db_name, reset_scn, reset_time, parent_dbinc_key) VALUES (rman_seq.nextval, local.db_key, upper(db_name), reset_scn, reset_time, local.parent_dbinc_key); EXCEPTION WHEN dup_val_on_index THEN raise_application_error(-20009, 'Db incarnation already registered'); END; SELECT rman_seq.currval INTO local.dbinc_key FROM dual; -- recompute chain of incarnation, and make it current recomputeDbincStatus(local.db_key, local.dbinc_key); deb('resetDatabase - commit, release locks'); commit; -- rollback on error EXCEPTION WHEN OTHERS THEN deb('resetDatabase - rollback, released all locks'); rollback; RAISE; END resetDatabase; -- reset database to specified incarnation. FUNCTION resetDatabase(db_id IN NUMBER ,db_name IN VARCHAR2 ,reset_scn IN NUMBER ,reset_time IN DATE ,parent_reset_scn IN NUMBER ,parent_reset_time IN DATE ) RETURN NUMBER IS local dbinc%rowtype; -- local variables BEGIN -- verify that this package is compatible with the recovery catalog BEGIN SELECT NULL INTO local.db_key FROM rcver WHERE version = catalogVersion; EXCEPTION WHEN no_data_found THEN raise_application_error(-20298, 'Not compatible with recovery catalog'); END; IF (this_ckp_key IS NOT NULL) THEN raise_application_error(-20030, 'Resync in progress'); END IF; IF (db_id IS NULL) THEN raise_application_error(-20007, 'db_id is null'); END IF; BEGIN SELECT db_key INTO local.db_key FROM db WHERE db.db_id = resetDatabase.db_id; -- should return 1 row EXCEPTION WHEN no_data_found THEN raise_application_error(-20001, 'Database not found'); END; SELECT dbinc_key INTO local.dbinc_key FROM dbinc WHERE dbinc.db_key = local.db_key AND dbinc.reset_scn = resetDatabase.reset_scn AND dbinc.reset_time = resetDatabase.reset_time; resetDatabase(local.dbinc_key, db_name); RETURN local.dbinc_key; END resetDatabase; -- make an existing database incarnation the current incarnation PROCEDURE resetDatabase( dbinc_key IN NUMBER ,db_name IN VARCHAR2 ) IS local dbinc%rowtype; -- local variables BEGIN -- verify that this package is compatible with the recovery catalog BEGIN SELECT NULL INTO local.db_key FROM rcver WHERE version = catalogVersion; EXCEPTION WHEN no_data_found THEN raise_application_error(-20298, 'Not compatible with recovery catalog'); END; IF (this_ckp_key IS NOT NULL) THEN raise_application_error(-20030, 'Resync in progress'); END IF; IF (dbinc_key IS NULL) THEN raise_application_error(-20008, 'Database incarnation key is missing'); END IF; this_db_key := NULL; this_dbinc_key := NULL; BEGIN SELECT db_key, db_name INTO local.db_key, local.db_name FROM dbinc WHERE dbinc.dbinc_key = resetDatabase.dbinc_key; EXCEPTION WHEN no_data_found THEN raise_application_error(-20010, 'Database incarnation not found'); END; IF (upper(db_name) <> local.db_name OR db_name IS NULL) THEN raise_application_error(-20004, 'Database name does not match'); END IF; -- recompute chain of incarnation recomputeDbincStatus(local.db_key, resetDatabase.dbinc_key); deb('resetDatabase - commit, release locks'); commit; -- rollback on error EXCEPTION WHEN OTHERS THEN deb('resetDatabase - rollback, released all locks'); rollback; RAISE; END resetDatabase; procedure resetDatabase( dbinc_key IN number ,db_name IN varchar2 ,reset_scn OUT number ,reset_time OUT date ,db_id IN number DEFAULT NULL ) IS local_db_key dbinc.db_key%TYPE; BEGIN IF db_id IS NOT NULL THEN BEGIN SELECT db_key INTO local_db_key FROM db WHERE db.db_id = resetDatabase.db_id; -- should return 1 row EXCEPTION WHEN no_data_found THEN raise_application_error(-20001, 'Database not found'); END; ELSE local_db_key := this_db_key; END IF; BEGIN SELECT reset_scn, reset_time INTO resetDatabase.reset_scn, resetDatabase.reset_time FROM dbinc WHERE dbinc.dbinc_key = resetDatabase.dbinc_key AND (db_id IS NULL OR dbinc.db_key = local_db_key); EXCEPTION WHEN no_data_found THEN raise_application_error(-20010, 'Database incarnation not found'); END; resetDatabase(dbinc_key, db_name); END resetDatabase; PROCEDURE unRegisterDatabase( db_key IN NUMBER DEFAULT NULL ,db_id IN NUMBER ) IS tmp NUMBER; BEGIN IF (this_ckp_key IS NOT NULL) THEN raise_application_error(-20030, 'Resync in progress'); END IF; -- check if the database exists in rcvcat BEGIN SELECT 0 INTO tmp FROM db WHERE db.db_id = unRegisterDatabase.db_id; EXCEPTION WHEN no_data_found THEN raise_application_error(-20001, 'Database not found'); END; DELETE FROM db WHERE db.db_id = unRegisterDatabase.db_id; deb('unRegisterDatabase - commit, release locks'); commit; -- rollback on error EXCEPTION WHEN OTHERS THEN deb('unregisterDatabase - rollback, released all locks'); rollback; RAISE; END unRegisterDatabase; -- set Archive log file sharing scope attributes for the session PROCEDURE setArchiveFileScopeAttributes(logs_shared IN NUMBER) IS BEGIN deb('setArchiveFileScopeAttributes'); IF logs_shared > 0 THEN dbms_rcvcat.logs_shared := TRUE#; ELSE dbms_rcvcat.logs_shared := FALSE#; END IF; deb('logs_shared = ' || dbms_rcvcat.logs_shared); dbms_rcvman.setArchiveFileScopeAttributes(logs_shared); deb('exiting setArchiveFileScopeAttributes'); END setArchiveFileScopeAttributes; -- set Backup file sharing scope attributes for the session PROCEDURE setBackupFileScopeAttributes( disk_backups_shared IN NUMBER, tape_backups_shared IN NUMBER) IS lsite_key NUMBER; BEGIN deb('setBackupFileScopeAttributes'); IF disk_backups_shared IS NOT NULL THEN IF disk_backups_shared > 0 THEN dbms_rcvcat.disk_backups_shared := TRUE#; ELSE dbms_rcvcat.disk_backups_shared := FALSE#; END IF; END IF; IF tape_backups_shared IS NOT NULL THEN IF tape_backups_shared > 0 THEN dbms_rcvcat.tape_backups_shared := TRUE#; ELSE dbms_rcvcat.tape_backups_shared := FALSE#; END IF; END IF; deb('disk_backups_shared='||dbms_rcvcat.disk_backups_shared); deb('tape_backups_shared='||dbms_rcvcat.tape_backups_shared); dbms_rcvman.setBackupFileScopeAttributes(disk_backups_shared, tape_backups_shared); deb('exiting setBackupFileScopeAttributes'); END setBackupFileScopeAttributes; /*--------------* * Set Database * *--------------*/ -- This procedure tells the package what target database we are working with. PROCEDURE setDatabase(db_name IN VARCHAR2 ,reset_scn IN NUMBER ,reset_time IN DATE ,db_id IN NUMBER ,db_unique_name IN VARCHAR2 ,dummy_instance IN BOOLEAN ,cf_type IN NUMBER ,site_aware IN BOOLEAN default FALSE) IS local dbinc%rowtype; -- local variables current_inc VARCHAR2(3); dbnm dbinc.db_name%TYPE; dbnm_in dbinc.db_name%TYPE; rid varchar2(18); local_site_key number; dbunqnm node.db_unique_name%TYPE; db_role node.database_role%type; dbunqnm_in node.db_unique_name%TYPE; cat_version varchar2(12); vpd_version varchar2(12); prim_dbunqnm_in node.db_unique_name%TYPE; db_id_in number; tmp_dbunqnm_cnt number; tmp_primary_cnt number; BEGIN -- verify that this package is compatible with the recovery catalog BEGIN SELECT NULL INTO local.db_key FROM rcver WHERE version = catalogVersion; EXCEPTION WHEN no_data_found THEN raise_application_error(-20298, 'Not compatible with recovery catalog'); END; IF (this_ckp_key IS NOT NULL) THEN raise_application_error(-20030, 'Resync in progress'); END IF; -- If using a virtual private catalog, and the base catalog has been -- upgraded, then upgrade the private catalog. IF user <> dbms_catowner THEN BEGIN SELECT max(version) INTO cat_version FROM rcver; SELECT version INTO vpd_version FROM vpc_users; IF cat_version <> vpd_version OR vpd_version IS NULL THEN create_virtual_catalog; END IF; EXCEPTION WHEN OTHERS THEN raise_application_error(-20013, 'Error upgrading virtual private catalog', true); END; END IF; this_db_key := NULL; -- clear in case exception raised this_dbinc_key := NULL; dbnm_in := upper(db_name); dbunqnm_in := upper(db_unique_name); db_id_in := db_id; <> -- If the target database is mounted, then we have the db_id (kccfhdbi). -- This can be used to find the row in the db table corresponding -- to the target database, and it will indicate which incarnation -- is currently considered the current one. IF (db_id_in IS NOT NULL) THEN BEGIN SELECT db_key, curr_dbinc_key, db_name INTO local.db_key, local.dbinc_key, local.db_name FROM db WHERE db.db_id = db_id_in; -- should return 1 row EXCEPTION WHEN no_data_found THEN raise_application_error(-20001, 'Database not found'); END; -- Validate SCN only only if the target database is indeed mounted IF (dbnm_in is NOT NULL AND db_id is NOT NULL) THEN -- Now validate that the resetlogs SCN we were passed matches that -- of the current incarnation of this database. If not, then -- a reset database should be done, or the wrong controlfile is -- mounted. BEGIN SELECT decode(dbinc.dbinc_key, db.curr_dbinc_key, 'YES', 'NO'), dbinc.db_name, dbinc.rowid INTO current_inc, dbnm, rid FROM db, dbinc WHERE db.db_key = dbinc.db_key AND db.db_id = setDatabase.db_id AND dbinc.reset_scn = setDatabase.reset_scn AND dbinc.reset_time = setDatabase.reset_time; EXCEPTION WHEN no_data_found THEN raise_application_error(-20003, 'Database incarnation not found'); END; IF (current_inc = 'NO') THEN raise_application_error(-20011, 'Database incarnation not current'); END IF; IF (dbnm != dbnm_in) THEN UPDATE dbinc SET dbinc.db_name = dbnm_in WHERE rowid = rid; deb('setDatabase - commit, release locks'); COMMIT; END IF; END IF; IF (NOT dummy_instance AND dbunqnm_in IS NOT NULL) THEN deb('setDatabase - check db_unique_name= ' || dbunqnm_in || ' cf_type= ' || cf_type); -- If we are seeing a database with a non-null db_unique_name for -- the first time after upgrade from 9i, set the value of null -- db_unique_name to current one and fall through. Note that there can -- be only one row with null db_unique_name value due to unique -- constraint on this column and dbid. Also assert upgrade went thru -- as expected. SELECT count(*) into tmp_dbunqnm_cnt FROM node WHERE node.db_unique_name is NULL AND node.db_key = local.db_key; IF tmp_dbunqnm_cnt = 1 THEN SELECT count(*) into tmp_dbunqnm_cnt FROM node WHERE node.db_unique_name is not NULL AND node.db_key = local.db_key; IF tmp_dbunqnm_cnt > 0 THEN raise_application_error(-20999, 'internal error: found non-null and null site name'); END IF; UPDATE NODE SET node.db_unique_name = dbunqnm_in WHERE node.db_unique_name is NULL AND node.db_key = local.db_key; deb('setDatabase: updating null db_unique_name with ' ||dbunqnm_in || 'number of rows updated ' || sql%rowcount); END IF; BEGIN -- change database_role if it is changed now SELECT node.database_role, site_key INTO db_role, local_site_key FROM node WHERE node.db_key = local.db_key AND node.db_unique_name = dbunqnm_in; -- count if any other databases are marked as primary other than -- current one SELECT count(*) into tmp_primary_cnt FROM node WHERE node.database_role = 'PRIMARY' AND site_key <> local_site_key AND node.db_key = local.db_key; deb('setDatabase - check database_role'); IF (cf_type = CF_STANDBY AND db_role != 'STANDBY') THEN -- controlfile is standby but not the database role deb('setDatabase - database role not standby - updating'); UPDATE node SET node.database_role = 'STANDBY', node.high_conf_recid = 0 WHERE site_key = local_site_key; COMMIT; ELSIF ((cf_type = CF_CURRENT OR cf_type = CF_BACKUP) AND (db_role != 'PRIMARY' OR tmp_primary_cnt > 1)) THEN -- controlfile is for primary but not the database role deb('setDatabase - not primary or primary_cnt='||tmp_primary_cnt); -- change current primary to standby and make new as primary UPDATE node SET node.database_role = 'STANDBY', node.high_conf_recid = 0 WHERE site_key <> local_site_key AND db_key = local.db_key; -- whenever we see a new primary database, resync the fixed -- record section to reflect the new primary database -- information. We only need to get the incremental changes -- from circular sections. Hence no need to reset those pointers. UPDATE node SET node.database_role = 'PRIMARY', node.high_conf_recid = 0, high_ic_recid = 0, high_ts_recid = NULL, high_df_recid = NULL, high_rt_recid = NULL, high_orl_recid = NULL, high_tf_recid = 0 WHERE site_key = local_site_key AND db_key = local.db_key; sessionWaterMarks.high_ic_recid := 0; COMMIT; prev_sessionWaterMarks := sessionWaterMarks; END IF; EXCEPTION WHEN no_data_found THEN IF (cf_type = CF_CURRENT OR cf_type = CF_BACKUP) THEN deb('setDatabase: found new primary database...'); -- change current primary to standby and make current as primary UPDATE node SET node.database_role = 'STANDBY', node.high_conf_recid = 0 WHERE db_key = local.db_key; INSERT INTO node(db_unique_name, db_key, force_resync2cf, database_role, site_key) VALUES(dbunqnm_in, local.db_key, 'NO', 'PRIMARY', rman_seq.nextval); COMMIT; ELSIF cf_type = CF_STANDBY THEN -- New standby site detected deb('setDatabase: found new standby database...'); INSERT INTO node(db_unique_name, db_key, force_resync2cf, database_role, site_key) VALUES(dbunqnm_in, local.db_key, 'NO', 'STANDBY', rman_seq.nextval); COMMIT; ELSE -- for all practical purposes, assume this to be primary site -- when the control file becomes, primary we will automatically -- rename the db_unique_name. Till then use the current -- primary database db_unique_name. -- Note, we must find one row, otherwise the upgrade did not -- work as expected for this database. BEGIN deb('setDatabase - falking db_unique_name from'|| dbunqnm_in); SELECT db_unique_name into prim_dbunqnm_in from node WHERE db_key = local.db_key AND database_role = 'PRIMARY'; dbunqnm_in := prim_dbunqnm_in; deb('setDatabase - changing dbunqnm_in to ' || dbunqnm_in); EXCEPTION WHEN no_data_found THEN deb('setDatabase - unknown dbunqnm_in set to null'); dbunqnm_in := null; END; END IF; END; END IF; -- if db_id is unknown, try using db_name ELSIF (dbnm_in IS NOT NULL) THEN BEGIN SELECT db.db_key, db.curr_dbinc_key, db.db_id INTO local.db_key, local.dbinc_key, db_id_in FROM db, dbinc WHERE db.curr_dbinc_key = dbinc.dbinc_key AND dbinc.db_name = dbnm_in; EXCEPTION WHEN no_data_found THEN raise_application_error(-20001, 'Database not found'); WHEN too_many_rows THEN raise_application_error(-20005, 'Database name is ambiguous'); END; GOTO now_try_with_dbid; ELSE raise_application_error(-20006, 'Database name is missing'); END IF; -- In case that db_unique_name is NULL, then we know this is pre-10i -- database. Or, it is 11g with new db_unique_name in nomount state. this_db_unique_name := dbunqnm_in; this_db_key := local.db_key; this_dbinc_key := local.dbinc_key; deb('setDatabase - this_db_unique_name=' ||this_db_unique_name); deb('setDatabase - this_dbinc_key:'||to_char(this_dbinc_key)); BEGIN select site_key into this_site_key from node where db_unique_name=upper(dbunqnm_in) AND db_key = this_db_key; deb('setDatabase - this_site_key:'||this_site_key); EXCEPTION WHEN no_data_found THEN BEGIN select site_key, db_unique_name into this_site_key, dbunqnm_in from node where database_role='PRIMARY' AND db_key = this_db_key; deb('setDatabase - this_site_key(primary):'||this_site_key); EXCEPTION WHEN no_data_found THEN -- in 11g, all sites known for db can be standby and we don't -- know the current primary. deb('setDatabase - this_site_key is null'); this_site_key := null; END; END; cntbs := 0; -- call setDatabase from rcvman package. Note that in this we call -- recovery catalog version! dbms_rcvman.setDatabase (dbnm_in, reset_scn, reset_time, db_id, this_db_unique_name, site_aware, dummy_instance); client_site_aware := site_aware; IF client_site_aware THEN setArchiveFileScopeAttributes(logs_shared => 0); setBackupFileScopeAttributes (disk_backups_shared => 0, tape_backups_shared => 1); END IF; END setDatabase; -- Exists for compatability PROCEDURE setDatabase(db_name IN VARCHAR2 ,reset_scn IN NUMBER ,reset_time IN DATE ,db_id IN NUMBER ,db_unique_name IN VARCHAR2 DEFAULT NULL) IS BEGIN setDatabase(db_name => db_name, reset_scn => reset_scn, reset_time => reset_time, db_id => db_id, db_unique_name => db_unique_name, dummy_instance => FALSE, cf_type => CF_CURRENT); END setDatabase; -- These two versions of setDatabase are not used by RMAN, they are shorthand -- methods of invoking setDatabase when you are accessing the recovery catalog -- schema from a tool like Sql*Plus, and you want to invoke some of the -- stored procecdures in the dbms_rcvcat or dbms_rcvman packages. PROCEDURE setDatabase(dbinc_key number) IS dbinc_row dbinc%ROWTYPE; db_row db%ROWTYPE; BEGIN select * into dbinc_row from dbinc where dbinc_key = setDatabase.dbinc_key; select * into db_row from db where db_key = dbinc_row.db_key; setDatabase(db_name => dbinc_row.db_name, reset_scn => dbinc_row.reset_scn, reset_time => dbinc_row.reset_time, db_id => db_row.db_id); END setDatabase; procedure setDatabase IS dbinckey number; BEGIN select curr_dbinc_key into dbinckey from db; setDatabase(dbinckey); END setDatabase; /*-----------------------------* * Recovery Catalog Checkpoint * *-----------------------------*/ PROCEDURE lockForCkpt IS local_dbid NUMBER; start_time DATE := sysdate; BEGIN IF (this_ckp_key IS NOT NULL) THEN raise_application_error(-20030, 'Resync in progress'); END IF; IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; -- We need to acquire lock on db before reading target database cf -- in order to give ckptNeeded with correct water marks. -- Otherwise RMAN may signal RMAN-20035 or RMAN-20033 errors when -- multiple resyncs are done in parallel. SELECT db_id INTO local_dbid FROM db WHERE db_key = this_db_key FOR UPDATE; deb('lockForCkpt - took ' || ((sysdate - start_time) * 86400) || ' seconds'); deb('lockForCkpt - Obtained all locks for db ' || to_char(this_db_key)); -- The locks obtained here will be released by one of ckptNeeded, -- cancelCkpt, or endCkpt. END lockForCkpt; FUNCTION ckptNeeded( ckp_scn IN NUMBER ,ckp_cf_seq IN NUMBER ,cf_version IN DATE ,cf_type IN NUMBER ,high_df_recid IN NUMBER ,high_orl_recid IN NUMBER ,high_cdf_recid IN NUMBER ,high_al_recid IN NUMBER ,high_bp_recid IN NUMBER ,high_do_recid IN NUMBER ,high_offr_recid IN NUMBER ,high_pc_recid IN NUMBER DEFAULT NULL -- for compatibility ,high_conf_recid IN NUMBER DEFAULT NULL -- for compatibility ,rltime IN DATE DEFAULT NULL -- for compatibility ,high_ts_recid IN NUMBER DEFAULT NULL -- for compatibility ,high_bs_recid IN NUMBER DEFAULT NULL -- for compatibility ,lopen_reset_scn IN number DEFAULT NULL -- for compatibility ,lopen_reset_time IN DATE DEFAULT NULL -- for compatibility ,high_ic_recid IN NUMBER DEFAULT NULL -- for compatibility ,high_tf_recid IN NUMBER DEFAULT NULL -- for compatibility ,high_rt_recid IN NUMBER DEFAULT NULL -- for compatibility ,high_grsp_recid IN NUMBER DEFAULT NULL -- for compatibility ,high_nrsp_recid IN NUMBER DEFAULT NULL -- for compatibility ,high_bcr_recid IN NUMBER DEFAULT NULL -- for compatibility ) RETURN NUMBER IS ckp_type NUMBER; local node%rowtype; local_dbid NUMBER := 0; local_reset_time DATE; local_reset_scn NUMBER := 0; cksum NUMBER; BEGIN IF (this_ckp_key IS NOT NULL) THEN raise_application_error(-20030, 'Resync in progress'); END IF; IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; IF (this_site_key IS NULL) THEN raise_application_error(-20199, 'Site key is not set'); END IF; SELECT db_id INTO local_dbid FROM db WHERE db.db_key = this_db_key FOR UPDATE; deb('ckptNeeded - Obtained all locks for database ' || to_char(this_db_key)); -- add the new recid that you add for resync here cksum := high_df_recid + high_orl_recid + high_cdf_recid + high_al_recid + high_bp_recid + high_do_recid + high_offr_recid + nvl(high_pc_recid, 0) + nvl(high_conf_recid, 0) + nvl(high_ts_recid, 0) + nvl(high_bs_recid, 0) + nvl(high_ic_recid, 0) + nvl(high_tf_recid, 0) + nvl(high_rt_recid, 0) + nvl(high_grsp_recid, 0) + nvl(high_nrsp_recid, 0) + nvl(high_bcr_recid, 0); -- Get the controlfile version timestamp and high watermarks from -- the recovery catalog. Lock the dbinc record to serialize resyncs. -- Note that ckptNeeded function returns with the dbinc record locked -- if a resync is needed. The client (RMAN) must call endCkpt or -- cancelCkpt to release the lock. Use nvl so we do not need to deal -- with nulls. SELECT cf_create_time, nvl(high_df_recid,0), nvl(high_ts_recid,0), nvl(high_orl_recid,0), nvl(high_cdf_recid,0), nvl(high_al_recid,0), nvl(high_bp_recid,0), nvl(high_do_recid,0), nvl(high_offr_recid,0), nvl(high_pc_recid,0), full_ckp_cf_seq, job_ckp_cf_seq, nvl(high_ic_recid,0), nvl(high_bs_recid,0), nvl(high_tf_recid, 0), nvl(high_rt_recid, 0), nvl(high_grsp_recid, 0), nvl(high_nrsp_recid, 0), nvl(high_bcr_recid, 0), high_conf_recid, force_resync2cf INTO local.cf_create_time, local.high_df_recid, local.high_ts_recid, local.high_orl_recid, local.high_cdf_recid, local.high_al_recid, local.high_bp_recid, local.high_do_recid, local.high_offr_recid, local.high_pc_recid, local.full_ckp_cf_seq, local.job_ckp_cf_seq, local.high_ic_recid, local.high_bs_recid, local.high_tf_recid, local.high_rt_recid, local.high_grsp_recid, local.high_nrsp_recid, local.high_bcr_recid, local.high_conf_recid, local.force_resync2cf FROM node WHERE site_key = this_site_key; SELECT reset_scn, reset_time into local_reset_scn, local_reset_time FROM dbinc WHERE dbinc_key = this_dbinc_key; ckp_type := RESYNC_NONE; setReason(RESYNC_REASON_NONE); IF (rltime IS NOT NULL AND rltime != local_reset_time) THEN -- We have not yet issued a RESET DATABASE after doing RESETLOGS, -- or we are not using the latest incarnation. In either case, we -- do not want to do implicit resync now, so tell caller resync is not -- needed now. -- rltime will be NULL if called from a PRE-8.2 RMAN, in which case -- we cannot check it, nor is the check needed for PRE-8.2. deb('ckptNeeded - rltime='||to_char(rltime)|| ', local_reset_time='||to_char(local_reset_time)); ckp_type := RESYNC_NONE; GOTO ret; ELSIF (cf_version = local.cf_create_time) THEN deb('ckptNeeded - local_reset_scn='||local_reset_scn|| ' lopen_reset_scn='||lopen_reset_scn); deb('ckptNeeded - local_reset_time='||local_reset_time|| ' lopen_reset_time='||lopen_reset_time); -- The controlfile is the same as the one seen during the last resync, -- so the high watermarks can be used to determine whether a resync is -- needed. Full resync is possible only from a current controlfile, -- skip full resync checks unless the controlfile is current. IF (cf_type = CF_CURRENT AND (lopen_reset_scn IS NULL or local_reset_scn = lopen_reset_scn) AND (lopen_reset_time IS NULL or local_reset_time = lopen_reset_time)) THEN deb('ckptNeeded - high_ts_recid='||to_char(high_ts_recid)|| ', local.high_ts_recid='||to_char(local.high_ts_recid)); IF (high_ts_recid > local.high_ts_recid) THEN ckp_type := RESYNC_FULL; IF local.high_ts_recid = 0 THEN setReason(RESYNC_REASON_NOACTION); ELSE setReason(RESYNC_REASON_TS); END IF; GOTO ret; ELSIF (high_ts_recid < local.high_ts_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded - high_df_recid='||to_char(high_df_recid)|| ', local.high_df_recid='||to_char(local.high_df_recid)); IF (high_df_recid > local.high_df_recid) THEN ckp_type := RESYNC_FULL; setReason(RESYNC_REASON_DF); GOTO ret; ELSIF (high_df_recid < local.high_df_recid) THEN -- the high recid in the controlfile should never be less than -- the one in the recovery catalog. If we ever get here it probably -- means that the user made an operational error such as restoring -- controlfile from an old copy made using os utilities. raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded - high_tf_recid='||to_char(high_tf_recid)|| ', local.high_tf_recid='||to_char(local.high_tf_recid)); IF (high_tf_recid > local.high_tf_recid) THEN ckp_type := RESYNC_FULL; setReason(RESYNC_REASON_TF); GOTO ret; ELSIF (high_tf_recid < local.high_tf_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded - high_rt_recid='||to_char(high_rt_recid)|| ', local.high_rt_recid='||to_char(local.high_rt_recid)); IF (high_rt_recid > local.high_rt_recid) THEN ckp_type := RESYNC_FULL; setReason(RESYNC_REASON_THR); GOTO ret; ELSIF (high_rt_recid < local.high_rt_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded - high_orl_recid='||to_char(high_orl_recid)|| ', local.high_orl_recid='||to_char(local.high_orl_recid)); IF (high_orl_recid > local.high_orl_recid) THEN ckp_type := RESYNC_FULL; setReason(RESYNC_REASON_ORL); GOTO ret; ELSIF (high_orl_recid < local.high_orl_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; -- We will say that we need full resync only if high_conf_recid does not -- local.high_conf_recid (RC and CF have different recids), or if -- local.force_resync2cf is set to TRUE. deb('ckptNeeded - high_conf_recid='||high_conf_recid|| ', local.high_conf_recid='||local.high_conf_recid); deb(' local.force_resync2cf='||local.force_resync2cf); IF (high_conf_recid != local.high_conf_recid OR local.force_resync2cf = 'YES') THEN ckp_type := RESYNC_FULL; setReason(RESYNC_REASON_CONF); GOTO ret; END IF; -- For backup/standby control file since we always do partial resync, -- configuration record changes are obtained, When that changes, we will -- have to trigger atleast partial resync. deb('ckptNeeded - high_cdf_recid='||to_char(high_cdf_recid)|| ', local.high_cdf_recid='||to_char(local.high_cdf_recid)); IF (high_cdf_recid > local.high_cdf_recid) THEN ckp_type := RESYNC_PARTIAL; GOTO ret; ELSIF (high_cdf_recid < local.high_cdf_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded - high_al_recid='||to_char(high_al_recid)|| ', local.high_al_recid='||to_char(local.high_al_recid)); IF (high_al_recid > local.high_al_recid) THEN ckp_type := RESYNC_PARTIAL; GOTO ret; ELSIF (high_al_recid < local.high_al_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded - high_bp_recid='||to_char(high_bp_recid)|| ', local.high_bp_recid='||to_char(local.high_bp_recid)); IF (high_bp_recid > local.high_bp_recid) THEN ckp_type := RESYNC_PARTIAL; GOTO ret; ELSIF (high_bp_recid < local.high_bp_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded - high_bs_recid='||to_char(high_bs_recid)|| ', local.high_bs_recid='||to_char(local.high_bs_recid)); IF (high_bs_recid > local.high_bs_recid) THEN ckp_type := RESYNC_PARTIAL; GOTO ret; ELSIF (high_bs_recid < local.high_bs_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded - high_do_recid='||to_char(high_do_recid)|| ', local.high_do_recid='||to_char(local.high_do_recid)); IF (high_do_recid > local.high_do_recid) THEN ckp_type := RESYNC_PARTIAL; GOTO ret; ELSIF (high_do_recid < local.high_do_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded - high_offr_recid='||to_char(high_offr_recid)|| ', local.high_offr_recid='||to_char(local.high_offr_recid)); IF (high_offr_recid > local.high_offr_recid) THEN ckp_type := RESYNC_PARTIAL; GOTO ret; ELSIF (high_offr_recid < local.high_offr_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded - high_pc_recid='||to_char(high_pc_recid)|| ', local.high_pc_recid='||to_char(local.high_pc_recid)); IF (high_pc_recid > local.high_pc_recid) THEN ckp_type := RESYNC_PARTIAL; GOTO ret; ELSIF (high_pc_recid < local.high_pc_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded - high_ic_recid='||to_char(high_ic_recid)|| ', local.high_ic_recid='||to_char(local.high_ic_recid)); IF (high_ic_recid > local.high_ic_recid) THEN ckp_type := RESYNC_PARTIAL; GOTO ret; ELSIF (high_ic_recid < local.high_ic_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded: high_grsp_recid='||to_char(high_grsp_recid)|| ', local.high_grsp_recid='||to_char(local.high_grsp_recid)); IF (high_grsp_recid > local.high_grsp_recid) THEN ckp_type := RESYNC_PARTIAL; GOTO ret; ELSIF (high_grsp_recid < local.high_grsp_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded: high_bcr_recid='||to_char(high_bcr_recid)|| ', local.high_bcr_recid='||to_char(local.high_bcr_recid)); IF (high_bcr_recid > local.high_bcr_recid) THEN ckp_type := RESYNC_PARTIAL; GOTO ret; ELSIF (high_bcr_recid < local.high_bcr_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; deb('ckptNeeded: high_nrsp_recid='||to_char(high_nrsp_recid)|| ', local.high_nrsp_recid='||to_char(local.high_nrsp_recid)); IF (high_nrsp_recid > local.high_nrsp_recid) THEN ckp_type := RESYNC_PARTIAL; GOTO ret; ELSIF (high_nrsp_recid < local.high_nrsp_recid) THEN raise_application_error(-20035, 'Invalid high recid'); END IF; ELSE -- Same backup or standby control file seen on site. IF (cksum = last_resync_cksum AND kccdivts = date2stamp(cf_version)) THEN deb('ckptNeeded - resync checksum same as last checksum'); ckp_type := RESYNC_NONE; ELSE ckp_type := RESYNC_PARTIAL; last_resync_cksum := cksum; END IF; END IF; ELSE -- The controlfile is different from the one seen at the last resync. -- If the control file is current then the database must have been -- opened since the restore and a full resync is needed. Otherwise -- we can only do a partial resync. IF (cf_type = CF_CURRENT) THEN deb('ckptNeeded - cf_type = CF_CURRENT'); ckp_type := RESYNC_FULL; setReason(RESYNC_REASON_CF); ELSE deb('ckptNeeded - cf_type != CF_CURRENT'); IF (cksum = last_resync_cksum AND kccdivts = date2stamp(cf_version)) THEN deb('ckptNeeded - resync checksum same as last checksum'); ckp_type := RESYNC_NONE; ELSE ckp_type := RESYNC_PARTIAL; last_resync_cksum := cksum; END IF; END IF; END IF; <> -- If it looks like we need a partial resync, but this is the same controfile -- as last time and the cf_seq has not advanced, then we do not need a resync. -- One of the circular record high water marks may still be zero because -- we have not been passed any records since we reset the high water mark -- to zero. This happens when a backup controlfile is mounted and no -- new circular records have been added for some record type. IF (ckp_type = RESYNC_PARTIAL AND cf_version = local.cf_create_time AND ckp_cf_seq = greatest(local.job_ckp_cf_seq, local.full_ckp_cf_seq)) THEN deb('ckptNeeded - cf_seq has not advanced - do not need a resync'); ckp_type := RESYNC_NONE; END IF; -- if resync is not needed, release the lock on dbinc and conf. IF (ckp_type = RESYNC_NONE) THEN deb('ckptNeeded - resync not needed, rollback, released all locks'); rollback; END IF; deb('ckptNeeded - returning ckp_type='||ckp_type); RETURN ckp_type; -- rollback on error to release the lock on dbinc EXCEPTION WHEN OTHERS THEN deb('ckptNeeded - error, rollback, released all locks'); rollback; RAISE; END ckptNeeded; PROCEDURE beginCkpt( ckp_scn IN NUMBER ,ckp_cf_seq IN NUMBER ,cf_version IN DATE ,ckp_time IN DATE ,ckp_type IN VARCHAR2 ,ckp_db_status IN VARCHAR2 ,high_df_recid IN NUMBER ,cf_type IN VARCHAR2 DEFAULT 'CURRENT' -- for compatibility reasons ) IS local ckp%rowtype; node_count NUMBER; db_role node.database_role%type; local_dbid NUMBER; local_reset_watermarks boolean := TRUE; BEGIN IF (this_ckp_key IS NOT NULL) THEN raise_application_error(-20030, 'Resync in progress'); END IF; IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; IF (this_site_key IS NULL) THEN raise_application_error(-20199, 'Site key is not set'); END IF; clearResyncActions; deb('beginCkpt - ckp_type = '||ckp_type||', cf_type ='||cf_type || ', ckp_scn =' || ckp_scn); -- lock the db table row to allow only one checkpoint at a time (per db) SELECT db_id INTO local_dbid FROM db WHERE db_key = this_db_key FOR UPDATE; deb('beginCkpt - Obtained all locks for db '|| to_char(this_db_key)); -- We do not want to process any circular record with a stamp that is -- less than kccdivts. These records are records in a backup controlfile -- that existed at the time the controlfile was made into a backup. -- We do not want to process them because they could pollute the recovery -- catalog with obsolete records for things that have been deleted since -- the backup controlfile was created. recover.bsq will not pass us such -- records, but it seems like a good idea for this package to enforce -- this anyway. kccdivts := date2stamp(cf_version); -- save in pkg global -- select the information needed to ensure that the checkpoint is valid SELECT ckp_scn, cf_create_time, decode(beginCkpt.ckp_type, 'FULL', full_ckp_cf_seq, greatest(job_ckp_cf_seq, full_ckp_cf_seq)), dbinc_key INTO local.ckp_scn, local.cf_create_time, local.ckp_cf_seq, local.dbinc_key FROM node WHERE site_key = this_site_key; -- save the control file version used for resync at this site last_cf_version_time := local.cf_create_time; -- find the previous checkpoint for this dbinc SELECT max(ckp_key) INTO local.ckp_key FROM ckp WHERE dbinc_key = this_dbinc_key; IF (local.ckp_key IS NULL) THEN deb('beginCkpt - first checkpoint for this incarnation '|| this_dbinc_key); local_reset_watermarks := TRUE; ELSIF (cf_type = 'CURRENT' OR (cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN -- If this is a full resync then make sure that the controlfile -- checkpoint scn is not less than the highest checkpoint scn so far. -- We cannot allow full resync from an old controlfile because it might -- logically corrupt the tablespace and datafile information. -- This check is skipped for a partial resync does not usually have -- a checkpoint scn and partial resync from an old controlfile is harmless IF (ckp_type = 'FULL' AND this_dbinc_key = local.dbinc_key) THEN IF (ckp_scn < local.ckp_scn) THEN deb('beginCkpt - cf scn='||ckp_scn||',catalog cf scn='||local.ckp_scn); raise_application_error(-20032, 'Invalid checkpoint SCN'); ELSIF (ckp_scn = local.ckp_scn AND ckp_cf_seq < local.ckp_cf_seq) THEN deb('beginCkpt - cf seq='||ckp_cf_seq||',catalog cf seq='|| local.ckp_cf_seq); raise_application_error(-20033, 'Invalid checkpoint cf seq#'); ELSIF (ckp_scn = local.ckp_scn AND ckp_cf_seq = local.ckp_cf_seq) THEN raise_application_error(-20034, 'Resync not needed'); END IF; END IF; IF (cf_version = local.cf_create_time) THEN deb('beginCkpt - Resync from same last control file'); -- Since the cf_version (kccdivts) is the same as last time, this -- controlfile is the same controlfile from which we last resynced, so -- ckp_cf_seq must advance. If ckp_cf_seqs are the same then the -- controlfile has not changed since the previous resync, so resync is -- not needed. IF (ckp_cf_seq < local.ckp_cf_seq AND this_dbinc_key = local.dbinc_key) THEN deb('beginCkpt - cf seq='||ckp_cf_seq||',catalog cf seq='|| local.ckp_cf_seq); raise_application_error(-20033, 'Invalid checkpoint cf seq#'); ELSIF (ckp_cf_seq = local.ckp_cf_seq AND this_dbinc_key = local.dbinc_key) THEN raise_application_error(-20034, 'Resync not needed'); END IF; local_reset_watermarks := FALSE; ELSE deb('beginCkpt - Resync from different control file'); local_reset_watermarks := TRUE; END IF; ELSE -- if mount status is BACKUP, then this is a explicit resync from -- a control file copy or a backup controlfile. IF (ckp_db_status = 'BACKUP') THEN deb('beginCkpt - Resync from control file copy'); local_reset_watermarks := TRUE; ELSE -- if control version is same as seen before, then this must be -- same controlfile that we previously resynced. IF (kccdivts = sessionWaterMarks.last_kccdivts) THEN deb('beginCkpt - Resync from same backup control file'); local_reset_watermarks := FALSE; ELSE deb('beginCkpt - Resync from different backup control file'); local_reset_watermarks := TRUE; END IF; END IF; END IF; IF (local_reset_watermarks) THEN deb('beginCkpt - init session watermarks'); sessionWaterMarks := init_sessionWaterMarks; sessionWaterMarks.last_kccdivts := kccdivts; END IF; -- For now, the water marks are kept track only for current and -- standby control file (if db_unique_name is not null). IF (cf_type = 'CURRENT' OR (cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN IF NOT local_reset_watermarks THEN deb('beginCkpt - update ckp_scn and use existing water marks'); -- update the ckp_scn and ckp_cf_seqs for the next resync. UPDATE node SET ckp_scn = greatest(ckp_scn, decode(beginCkpt.ckp_type, 'FULL', beginCkpt.ckp_scn, 0)), full_ckp_cf_seq = greatest(full_ckp_cf_seq, decode(beginCkpt.ckp_type, 'FULL', beginCkpt.ckp_cf_seq, 0)), job_ckp_cf_seq = greatest(job_ckp_cf_seq, decode(beginCkpt.ckp_type, 'PARTIAL', beginCkpt.ckp_cf_seq, 0)), bcr_in_use = nvl2(high_bcr_recid, 'YES', 'NO') WHERE site_key = this_site_key; ELSE -- This has to be one of the following cases: -- 1) It might be first checkpoint for this incarnation -- 2) The control file is different from the last time at this node -- 3) The incarnation at which this site did last time resync is -- different than the current incarnation. -- 4) It may be created control file in same incarnation when last time -- resync was done. -- In all these cases, the water marks in node table are not correct, -- and hence reset them. -- -- bug 5932029 also set high_id_recid = 0 deb('beginCkpt - update ckp_scn and reset water marks, this_site_key '|| this_site_key); UPDATE node SET cf_create_time = beginCkpt.cf_version, dbinc_key = this_dbinc_key, ckp_scn = decode(beginCkpt.ckp_type, 'FULL', beginCkpt.ckp_scn, 0), full_ckp_cf_seq = decode(beginCkpt.ckp_type, 'FULL', beginCkpt.ckp_cf_seq, 0), job_ckp_cf_seq = decode(beginCkpt.ckp_type, 'PARTIAL', beginCkpt.ckp_cf_seq, 0), high_ic_recid = 0, high_ts_recid = NULL, high_df_recid = NULL, high_rt_recid = NULL, high_orl_recid = NULL, high_offr_recid = 0, high_rlh_recid = 0, high_al_recid = 0, high_bs_recid = 0, high_bp_recid = 0, high_bdf_recid = 0, high_cdf_recid = 0, high_brl_recid = 0, high_bcb_recid = 0, high_ccb_recid = 0, high_do_recid = 0, high_pc_recid = 0, high_bsf_recid = 0, high_rsr_recid = 0, high_tf_recid = 0, high_grsp_recid = 0, high_nrsp_recid = 0, high_bcr_recid = 0, bcr_in_use = nvl2(high_bcr_recid, 'YES', 'NO') WHERE site_key = this_site_key; END IF; ELSE -- remember the control file version and ckp_scn at this node UPDATE node SET cf_create_time = beginCkpt.cf_version, dbinc_key = this_dbinc_key, ckp_scn = decode(beginCkpt.ckp_type, 'FULL', beginCkpt.ckp_scn, 0), full_ckp_cf_seq = decode(beginCkpt.ckp_type, 'FULL', beginCkpt.ckp_cf_seq, 0), job_ckp_cf_seq = decode(beginCkpt.ckp_type, 'PARTIAL', beginCkpt.ckp_cf_seq, 0), bcr_in_use = nvl2(high_bcr_recid, 'YES', 'NO') WHERE site_key = this_site_key; END IF; -- record resyncs BEGIN INSERT INTO ckp (ckp_key, ckp_scn, ckp_cf_seq, cf_create_time, ckp_time, dbinc_key, ckp_type, ckp_db_status, resync_time, site_key) VALUES (rman_seq.nextval, ckp_scn, ckp_cf_seq, cf_version, ckp_time, this_dbinc_key, beginCkpt.ckp_type, ckp_db_status, sysdate, this_site_key); -- set package state variables SELECT rman_seq.currval INTO this_ckp_key FROM dual; EXCEPTION WHEN dup_val_on_index THEN IF (cf_type = 'CURRENT' OR (cf_type = 'STANDBY' AND this_db_unique_name IS NOT NULL)) THEN RAISE; ELSE -- this is a non-current controlfile resync. Because we do not track -- the high water marks for this type of controlfile, ckptNeeded -- always returns PARTIAL resync for non-current controlfile. -- So, we optimize here that resync is not needed if no controlfile -- txn is done. But, if some controlfile txn is done, then ALL -- circular records (including duplicate) are resynced. raise_application_error(-20034, 'Resync not needed'); END IF; END; SELECT count(*) INTO node_count FROM node WHERE node.db_key = this_db_key AND node.db_unique_name = this_db_unique_name; IF (node_count = 0 AND this_db_unique_name IS NOT NULL) THEN IF (cf_type = 'STANDBY') THEN db_role := 'STANDBY'; ELSE db_role := 'PRIMARY'; END IF; deb('beginCkpt - adding node row with force_resync2cf=NO'); INSERT INTO node(db_unique_name, db_key, high_conf_recid, force_resync2cf, database_role, site_key) VALUES(this_db_unique_name, this_db_key, 0, 'NO', db_role, rman_seq.nextval); END IF; IF cf_type = 'STANDBY' THEN SELECT max(ckp_scn) INTO last_full_ckp_scn FROM ckp WHERE ckp_type = 'FULL' AND dbinc_key = this_dbinc_key; END IF; this_ckp_scn := ckp_scn; this_ckp_time := ckp_time; this_cf_type := cf_type; -- rollback on error to release the lock on dbinc EXCEPTION WHEN OTHERS THEN deb('beginCkpt - error, rollback, released all locks'); rollback; RAISE; END beginCkpt; PROCEDURE endCkpt IS BEGIN checkResync; IF (tsRec.ts# IS NOT NULL) THEN raise_application_error(-20041, 'Tablespace resync not completed'); END IF; IF (dfRec.file# IS NOT NULL) THEN raise_application_error(-20051, 'Datafile resync not completed'); END IF; deb('endCkpt - commit, release locks'); commit; -- commit all of our changes prev_sessionWaterMarks := sessionWaterMarks; /* Do not run cleanupTempResource when connected to a virtual catalog, * because we do not allow merging catalogs to be done by a virtual * catalog user. */ if user = dbms_catowner then cleanupTempResource; end if; this_ckp_key := NULL; -- and update state variable this_ckp_scn := NULL; this_ckp_time := NULL; this_cf_type := NULL; last_cf_version_time := NULL; END endCkpt; PROCEDURE cancelCkpt IS BEGIN deb('cancelCkpt - rollback, released all locks'); rollback; sessionWaterMarks := prev_sessionWaterMarks; IF (this_ckp_key IS NOT NULL) THEN -- rollback and reset state variables this_ckp_key := NULL; this_ckp_scn := NULL; this_ckp_time := NULL; END IF; IF tsQ%ISOPEN THEN CLOSE tsQ; END IF; IF dfQ%ISOPEN THEN CLOSE dfQ; END IF; IF tfQ%ISOPEN THEN CLOSE tfQ; END IF; IF rtQ%ISOPEN THEN CLOSE rtQ; END IF; IF orlQ%ISOPEN THEN CLOSE orlQ; END IF; IF grspQ%ISOPEN THEN CLOSE grspQ; END IF; IF bpq%ISOPEN THEN CLOSE bpq; END IF; -- resync not successful and hence next resync shouldn't prevented last_resync_cksum := NULL; END cancelCkpt; /*-------------------* * Tablespace Resync * *-------------------*/ PROCEDURE fetchTs IS -- this is private to the pkg body BEGIN FETCH tsQ INTO tsRec; -- get next row IF tsQ%NOTFOUND THEN tsRec.ts# := MAXNUMVAL; -- indicate end of fetch CLOSE tsQ; ELSE deb('fetchTs - '||tsRec.ts_name||' ('||to_char(tsRec.ts#)||') '|| to_char(tsRec.create_scn) || ';plugin_scn='||to_char(tsRec.plugin_scn)); END IF; END fetchTs; PROCEDURE addTs( ts_name IN VARCHAR2 ,ts# IN NUMBER ,create_scn IN NUMBER ,create_time IN DATE ,rbs_count IN NUMBER ,included_in_database_backup IN VARCHAR2 ,bigfile IN VARCHAR2 ,temporary IN VARCHAR2 ,encrypt_in_backup IN VARCHAR2 ,plugin_scn IN NUMBER ) IS BEGIN deb('addTs - tablespace '||ts_name||' ('||to_char(ts#)||') '|| to_char(create_scn) || ',plugin_scn=' || to_char(plugin_scn)); INSERT INTO ts (dbinc_key, ts#, ts_name, create_scn, create_time, included_in_database_backup, bigfile, temporary, encrypt_in_backup, plugin_scn) VALUES (this_dbinc_key, ts#, ts_name, create_scn, create_time, included_in_database_backup, bigfile, temporary, encrypt_in_backup, plugin_scn); INSERT INTO tsatt (dbinc_key, ts#, create_scn, start_ckp_key, rbs_count, plugin_scn) VALUES (this_dbinc_key, ts#, create_scn, this_ckp_key, rbs_count, plugin_scn); END addTs; PROCEDURE dropTs( -- private to package body ts# IN NUMBER ,create_scn IN NUMBER ,drop_scn IN NUMBER ,drop_time IN DATE ,plugin_scn IN NUMBER ) IS BEGIN deb('dropTs - tablespace '||to_char(ts#)||' - '||to_char(create_scn) || ',plugin_scn - ' || plugin_scn); UPDATE ts SET drop_scn = dropTs.drop_scn, drop_time = dropTs.drop_time WHERE ts.dbinc_key = this_dbinc_key AND ts.ts# = dropTs.ts# AND ts.create_scn = dropTs.create_scn AND ts.plugin_scn = dropTs.plugin_scn; deb('dropTs - returning'); END dropTs; PROCEDURE renameTs( ts_name IN VARCHAR2 ,dbinc_key IN NUMBER ,ts# IN NUMBER ,create_scn IN NUMBER ,plugin_scn IN NUMBER ) IS BEGIN UPDATE ts SET ts.ts_name = renameTs.ts_name WHERE ts.dbinc_key = renameTs.dbinc_key AND ts.ts# = renameTs.ts# AND ts.create_scn = renameTs.create_scn AND ts.plugin_scn = renameTs.plugin_scn; END renameTs; FUNCTION beginTableSpaceResync( high_ts_recid IN NUMBER, force IN BOOLEAN DEFAULT FALSE) RETURN BOOLEAN IS BEGIN checkResync; -- if the force is TRUE that means we want resync even if righ_ts_recid -- is smaller than high_ts_recid. -- if the high_ts_recid in the controlfile is equal to the high_ts_recid -- stored in the rcvcat then the tablespace information in the controlfile -- has not changed since the previous resync, so there is no reason to -- resync it. If the high_ts_recid has been incremented since the previous -- resync then the tablespace information needs to be resynced again. SELECT high_ts_recid INTO last_ts_recid FROM node WHERE site_key = this_site_key; IF (high_ts_recid = last_ts_recid AND NOT force) THEN deb('beginTableSpaceResync - Resync of tablespaces not needed'); RETURN FALSE; ELSIF (high_ts_recid > last_ts_recid OR last_ts_recid IS NULL OR high_ts_recid IS NULL OR force) THEN deb('beginTableSpaceResync - Catalog ts_recid: '||last_ts_recid); last_ts_recid := high_ts_recid; OPEN tsQ; -- just open that cursor please fetchTs; -- do priming read last_ts# := -1; -- initialize for ordering assert if resync_reason = RESYNC_REASON_TS then fullResyncAction.active := TRUE; fullResyncAction.valid := TRUE; fullResyncAction.objtype := RESYNC_OBJECT_TABLESPACE; else fullResyncAction.active := FALSE; end if; RETURN TRUE; ELSE raise_application_error(-20035, 'Invalid high recid'); END IF; END beginTableSpaceResync; PROCEDURE checkTableSpace( ts_name IN VARCHAR2 ,ts# IN NUMBER ,create_scn IN NUMBER ,create_time IN DATE ,rbs_count IN NUMBER DEFAULT NULL ,included_in_database_backup IN VARCHAR2 DEFAULT NULL ,bigfile IN VARCHAR2 DEFAULT NULL ,temporary IN VARCHAR2 DEFAULT NULL ,encrypt_in_backup IN VARCHAR2 DEFAULT NULL ,plugin_scn IN NUMBER DEFAULT 0 ) IS -- Bug 1478785. -- 8.0.5- rman versions does not accept default value as string. -- To maintain compatibility pass NULL as default value. idb varchar2(3) := nvl(included_in_database_backup, 'YES'); -- actual default value bf varchar2(3) := nvl(bigfile, 'NO'); -- actual default value tmp varchar2(3) := nvl(temporary, 'NO'); -- actual default value ts_changed boolean := FALSE; BEGIN IF (tsRec.ts# IS NULL) THEN -- assert beginTableSpaceResync was called raise_application_error(-20040, 'Tablespace resync not started'); END IF; IF (last_ts# >= ts#) THEN -- assert rows passed in ascending raise_application_error(-20036, 'Invalid record order'); END IF; -- If temporary arguments is NOT NULL, then client is aware of resyncing -- temporary tablespace. Otherwise, not. In otherwords, this is a -- indication whether the client >= 10gR2 or not because we introduced -- temporary tablespace resync in 10gR2. IF (temporary IS NOT NULL) THEN do_temp_ts_resync := TRUE; END IF; last_ts# := ts#; -- for checking next time -- all tablespaces that exist at a checkpoint must have -- create_scn <= ckp_scn. Assert this since the correctness of -- rc_ckp_tablespace view depends on this assumption IF (create_scn > this_ckp_scn) THEN raise_application_error(-20042, 'Invalid tablespace create SCN'); END IF; -- If the current tablespace in tsRec has a lower ts# than the tablespace -- we are currently checking, then it must have been dropped. Note -- multiple such tablespaces may exist in the recovery catalog, so drop -- all such. -- If temp tablespace are not resynced(is a < 10gR2 client), then we cannot -- mark them dropped until they use a 10gR2 or later rman client. -- deb('checkTableSpace - ts#: ' || ts# || ' tsRec.ts#: ' || tsRec.ts#); WHILE (ts# > tsRec.ts#) LOOP IF (tsRec.temporary = 'NO' OR -- is a permanent tablespace do_temp_ts_resync) THEN -- is a 10gR2 or later rman client deb('checkTableSpace - before calling dropTS'); dropTs(tsRec.ts#, tsRec.create_scn, this_ckp_scn, this_ckp_time, tsRec.plugin_scn); deb('checkTableSpace - before calling incResyncActions'); begin incResyncActions(RESYNC_ACTION_DROP, tsRec.ts#, tsRec.ts_name); exception when others then deb('checkTableSpace - caugth exception ' || substr(sqlerrm, 1, 132)); end; deb('checkTableSpace - after calling incResyncActions'); END IF; deb('checkTableSpace - before calling fetchTS'); fetchTs; deb('checkTableSpace - after calling fetchTS'); END LOOP; deb('checkTableSpace -out of loop, ts#: ' || ts# || ' tsRec.ts#: ' || tsRec.ts#); IF (ts# < tsRec.ts#) THEN -- this tablespace is new and must be inserted to rcvcat addTs(ts_name, ts#, create_scn, create_time, rbs_count, idb, bf, tmp, encrypt_in_backup, plugin_scn); incResyncActions(RESYNC_ACTION_ADD, ts#, ts_name); ELSE -- (ts# = tsRec.ts#) IF (create_scn = tsRec.create_scn) THEN -- this is an existing tablespace which is already recorded in rcvcat -- check that create_time matches IF (create_time <> tsRec.create_time) THEN raise_application_error(-20043, 'Invalid tablespace create time'); END IF; IF (plugin_scn > 0) THEN IF (tsRec.plugin_scn <> 0 AND tsRec.plugin_scn < checkTableSpace.plugin_scn) THEN deb('checkTableSpace - plugin read only tbs dropped and replugged'); -- If plugin scn of current tablespace is less than input plugin scn -- then user may have dropped the old plugin scn and plugged in -- a new tablespace. dropTs(tsRec.ts#, tsRec.create_scn, this_ckp_scn, this_ckp_time, tsRec.plugin_scn); incResyncActions(RESYNC_ACTION_DROP, tsRec.ts#, tsRec.ts_name); addTs(ts_name, ts#, create_scn, create_time, rbs_count, idb, bf, tmp, encrypt_in_backup, plugin_scn); incResyncActions(RESYNC_ACTION_ADD, ts#, ts_name); goto next_Ts; ELSIF (tsRec.plugin_scn > checkTableSpace.plugin_scn) THEN -- plugin_scn can not decrease raise_application_error(-20055, 'Invalid tablespace plugin SCN'); ELSE -- plugin_scn of tablespaces is same, proceed with remaining updates -- below. deb('checkTableSpace - known plugged in tablespace'); END IF; END IF; -- if ts# and create_time match, but ts_name does not match, assume -- Tablespace name was renamed IF (ts_name <> tsRec.ts_name) THEN renameTs(ts_name, this_dbinc_key, tsRec.ts#, tsRec.create_scn, tsRec.plugin_scn); incResyncActions(RESYNC_ACTION_RENAME, tsRec.ts#, ts_name); END IF; -- if included_in_database_backup field has changed then update ts table -- note that the initial value may be null, hence the nvl function IF (idb <> nvl(tsRec.included_in_database_backup,'XX')) THEN UPDATE ts SET ts.included_in_database_backup = checkTableSpace.included_in_database_backup WHERE ts.dbinc_key = this_dbinc_key AND ts.ts# = tsRec.ts# AND ts.create_scn = tsRec.create_scn AND ts.plugin_scn = tsRec.plugin_scn; ts_changed := TRUE; END IF; -- if encrypt_in_backup field has changed then update ts table -- note that the initial value or new value may be null. IF (tsRec.encrypt_in_backup is null and encrypt_in_backup is not null OR tsRec.encrypt_in_backup is not null and encrypt_in_backup is null OR tsRec.encrypt_in_backup <> encrypt_in_backup) THEN UPDATE ts SET ts.encrypt_in_backup = checkTableSpace.encrypt_in_backup WHERE ts.dbinc_key = this_dbinc_key AND ts.ts# = tsRec.ts# AND ts.create_scn = tsRec.create_scn AND ts.plugin_scn = tsRec.plugin_scn; ts_changed := TRUE; END IF; -- if rbs_count field has changed to a not null value, update tsatt -- note that the initial value may be null, hence the nvl function IF (rbs_count <> nvl(tsRec.rbs_count,-1)) THEN UPDATE tsatt SET end_ckp_key = this_ckp_key WHERE tsatt.dbinc_key = this_dbinc_key AND tsatt.ts# = tsRec.ts# AND tsatt.create_scn = tsRec.create_scn AND tsatt.plugin_scn = tsRec.plugin_scn AND tsatt.end_ckp_key IS NULL; INSERT INTO tsatt(dbinc_key, ts#, create_scn, start_ckp_key, rbs_count, plugin_scn) VALUES(this_dbinc_key, tsRec.ts#, tsRec.create_scn, this_ckp_key, rbs_count, tsRec.plugin_scn); ts_changed := TRUE; END IF; if ts_changed then incResyncActions(RESYNC_ACTION_CHANGE, tsRec.ts#, tsRec.ts_name); end if; ELSIF (create_scn = 0 AND tmp = 'YES') THEN -- this must be a pre-10gR2 tempfile where create_scn was not set. -- We can only track one tempfile with create_scn as 0 in current -- incarnation. dropTs(tsRec.ts#, tsRec.create_scn, create_scn, create_time, tsRec.plugin_scn); DELETE FROM ts WHERE ts.dbinc_key = this_dbinc_key AND ts.ts# = checkTableSpace.ts# AND ts.create_scn = 0 AND ts.plugin_scn = 0; addTs(ts_name, ts#, create_scn, create_time, rbs_count, idb, bf, tmp, encrypt_in_backup, plugin_scn); incResyncActions(RESYNC_ACTION_CHANGE, tsRec.ts#, ts_name); ELSE IF (tmp = 'YES') THEN -- bug# 7215002. If temporary ts# reuses permanent ts#, then mark -- it dropped before adding temp ts#. IF (tsRec.temporary = 'NO') THEN dropTs(tsRec.ts#, tsRec.create_scn, create_scn, create_time, tsRec.plugin_scn); END IF; -- temporary tablespace create_scn is derived from lowest tempfile -- create scn. Tempfile between primary and standby can have a -- different create scn and hence different ts_create_scn. So, -- delete any duplicate ts entry before adding this new ts. -- See bug# 5934290 for details DELETE FROM ts WHERE ts.dbinc_key = this_dbinc_key AND ts.ts# = checkTablespace.ts# AND ts.temporary = 'YES'; deb('Deleting tablespace entry for ts#=' || ts# || ', ts_name=' || ts_name); addTs(ts_name, ts#, create_scn, create_time, rbs_count, idb, bf, tmp, encrypt_in_backup, plugin_scn); deb('Added tablespace entry for ts#=' || ts# || ', ts_name=' || ts_name); incResyncActions(RESYNC_ACTION_RECREATE, ts#, ts_name); ELSE IF (create_scn > tsRec.create_scn) THEN -- this tablespace has been recreated, mark the old one dropped -- and insert the new one into recovery catalog -- -- ###If first tempfile is dropped, then the create_scn of -- tablespace is advanced. So, query thinks that tablespace is -- dropped and added. -- Some extra work but I do not think it could cause any -- problems. dropTs(tsRec.ts#, tsRec.create_scn, create_scn, create_time, tsRec.plugin_scn); addTs(ts_name, ts#, create_scn, create_time, rbs_count, idb, bf, tmp, encrypt_in_backup, plugin_scn); incResyncActions(RESYNC_ACTION_RECREATE, tsRec.ts#, ts_name); ELSE -- (create_scn < tsRec.create_scn) -- The client is passing us a tablespace with a lower creation -- SCN than the one we currently have listed in the rcvcat for -- this ts#. This is a big NO NO. Probably the target database -- has an old controlfile mounted. Signal an error. raise_application_error(-20042, 'Invalid tablespace creation change#'); END IF; END IF; END IF; <> fetchTS; -- get next row from TS cursor END IF; -- (ts# < tsRec.ts) END checkTableSpace; PROCEDURE endTableSpaceResync IS BEGIN checkResync; -- If temp tablespace are not resynced(is a < 10gR2 client), then we cannot -- mark them dropped until they use a 10gR2 or later rman client. -- deb('endTableSpaceResync - tsRec.ts#: ' || tsRec.ts#); begin WHILE (tsRec.ts# < MAXNUMVAL) LOOP -- while extra tablespaces in rcvcat IF (tsRec.temporary = 'NO' OR -- is a permanent tablespace do_temp_ts_resync) THEN -- is a 10gR2 or later rman client deb('endTableSpaceResync - before calling dropTS'); dropTs(tsRec.ts#, tsRec.create_scn, this_ckp_scn, this_ckp_time, tsRec.plugin_scn); deb('endTableSpaceResync - before calling incResyncActions'); begin incResyncActions(RESYNC_ACTION_DROP, tsRec.ts#, tsRec.ts_name); exception when others then deb('endTableSpaceResync - caugth exception ' || substr(sqlerrm, 1, 132)); end; deb('endTableSpaceResync - after calling incResyncActions'); END IF; deb('endTableSpaceResync - before calling fetchTS'); fetchTs; deb('endTableSpaceResync - after calling fetchTS'); END LOOP; exception when others then deb('checkTableSpace - caugth exception ' || substr(sqlerrm, 1, 132)); end; deb('endTableSpaceResync -out of loop, tsRec.ts#: ' || tsRec.ts#); -- set the state variable to indicate that tablespace resync is done tsRec.ts# := NULL; -- update high_ts_resync for the next resync UPDATE node SET high_ts_recid = nvl(last_ts_recid, high_ts_recid) WHERE site_key = this_site_key; last_ts_recid := NULL; -- reset high_tf_recid so that first resync with 10gR2 rman client would -- trigger a temporary tablespace resync. IF (NOT do_temp_ts_resync) THEN UPDATE node SET high_tf_recid = 0 WHERE site_key = this_site_key; END IF; END endTableSpaceResync; /*-----------------* * Datafile Resync * *-----------------*/ PROCEDURE fetchDF IS -- private to package body BEGIN FETCH dfQ INTO dfRec; IF dfQ%NOTFOUND THEN dfRec.file# := MAXNUMVAL; -- indicate end-of-fetch CLOSE dfQ; END IF; END fetchDF; PROCEDURE addDF(file# IN NUMBER, -- private to package body fname IN VARCHAR2, create_time IN DATE, create_scn IN NUMBER, blocks IN NUMBER, block_size IN NUMBER, ts# IN NUMBER, stop_scn IN NUMBER, stop_time IN DATE, read_only IN number, rfile# IN NUMBER, foreign_dbid IN number, foreign_create_scn IN number, foreign_create_time IN date, plugged_readonly IN varchar2, plugin_scn IN number, plugin_reset_scn IN number, plugin_reset_time IN date, create_thread IN number, create_size IN number) IS ts_create_scn NUMBER; ts_plugin_scn NUMBER; ts_name ts.ts_name%type; local_df_key NUMBER; child_rec exception; pragma exception_init(child_rec, -2292); BEGIN SELECT create_scn, plugin_scn, ts_name INTO ts_create_scn, ts_plugin_scn, ts_name FROM ts WHERE ts.dbinc_key = this_dbinc_key AND ts.ts# = addDF.ts# AND ts.drop_scn IS NULL; -- in case ts numbers are reused -- If RMAN client has not passed plugin scn for tablespace, it will be -- 0. Update it's value now to correct plugin_scn IF ts_plugin_scn = 0 AND plugin_scn > 0 THEN deb('addDf- fixing plugin SCN for tablespace ' || ts_name || '(' || ts# || ') to ' || plugin_scn); BEGIN UPDATE ts set plugin_scn = addDf.plugin_scn WHERE dbinc_key = this_dbinc_key AND ts# = addDF.ts# AND create_scn = ts_create_scn AND plugin_scn = 0; UPDATE tsatt set plugin_scn = addDf.plugin_scn WHERE dbinc_key = this_dbinc_key AND ts# = addDF.ts# AND create_scn = ts_create_scn AND plugin_scn = 0; EXCEPTION WHEN child_rec THEN DELETE DF WHERE dbinc_key = this_dbinc_key AND ts# = addDF.ts# AND ts_create_scn = addDf.ts_create_scn AND plugin_scn = 0; deb('addDf- found some datafiles that belonged to same tablespace,'); deb(' but is not marked as plugged in; deleted rows ' || sql%ROWCOUNT); UPDATE ts set plugin_scn = addDf.plugin_scn WHERE dbinc_key = this_dbinc_key AND ts# = addDF.ts# AND create_scn = ts_create_scn AND plugin_scn = 0; UPDATE tsatt set plugin_scn = addDf.plugin_scn WHERE dbinc_key = this_dbinc_key AND ts# = addDF.ts# AND create_scn = ts_create_scn AND plugin_scn = 0; END; ts_plugin_scn := addDf.plugin_scn ; END IF; -- If the data file is already known to the catalog, get its df_key; -- otherwise, assign a new df_key. -- There may be multiple rows in DFATT with same DF_KEY. The column values -- DF_KEY along with DBINC_KEY forms the unique column values. Hence -- we need to use distinct in below query. BEGIN select distinct df_key into local_df_key from df, dbinc where file# = addDF.file# and create_scn = addDF.create_scn and plugin_scn = addDF.plugin_scn and foreign_dbid = addDF.foreign_dbid and ts# = addDF.ts# and df.dbinc_key = dbinc.dbinc_key and dbinc.db_key = this_db_key; EXCEPTION WHEN no_data_found THEN select rman_seq.nextval into local_df_key from dual; END; -- -- Bug 1332121: Insert 0 for blocks rather than NULL for new records -- of datafile as 'list' command in 8.1.5- RMAN will fail with ORA-1405. -- INSERT INTO df(dbinc_key, file#, create_scn, create_time, ts#, ts_create_scn, block_size, stop_scn, stop_time, read_only, rfile#, df_key, blocks, foreign_dbid, foreign_create_scn, foreign_create_time, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time, create_thread, create_size) VALUES(this_dbinc_key, file#, create_scn, create_time, ts#, ts_create_scn, block_size, stop_scn, stop_time, read_only, rfile#, local_df_key, nvl(blocks, 0), foreign_dbid, foreign_create_scn, foreign_create_time, plugged_readonly, ts_plugin_scn, plugin_reset_scn, plugin_reset_time, create_thread, create_size); -- Note that we have only one row per df_key in site_dfatt as we track only -- the latest changes at a site. So, the row if already exists should reflect -- any changes. BEGIN INSERT INTO site_dfatt(df_key, fname, site_key) VALUES(local_df_key, fname, this_site_key); EXCEPTION WHEN dup_val_on_index THEN -- Fix the file name if it has changed. UPDATE site_dfatt SET fname = addDf.fname WHERE site_key = this_site_key AND df_key = local_df_key; END; END addDf; PROCEDURE setDatafileSize(file# IN number ,create_scn IN number ,blocks IN number ,plugin_scn IN number default 0) IS BEGIN IF (this_dbinc_key is NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; update df set df.blocks = setDatafileSize.blocks where dbinc_key = this_dbinc_key and df.file# = setDatafileSize.file# and df.create_scn = setDatafileSize.create_scn and df.plugin_scn = setDatafileSize.plugin_scn; deb('setDatafileSize - commit, release locks'); commit; END setDatafileSize; PROCEDURE dropDf( -- private to package body file# IN NUMBER ,create_scn IN NUMBER ,plugin_scn IN NUMBER ,drop_scn IN NUMBER ,drop_time IN DATE ) IS BEGIN -- adjust the drop_scn and drop_time of the tablespace. We can do this -- because datafiles are always dropped with their tablespaces. -- update ts set -- drop_scn = least(drop_scn,dropDf.drop_scn), -- drop_time = least(drop_time,dropDf.drop_time) -- where ts_key = -- (select ts_key from df -- where df_key = dropDf.df_key); UPDATE df SET drop_scn = dropDf.drop_scn, drop_time = dropDf.drop_time WHERE df.dbinc_key = this_dbinc_key AND df.file# = dropDf.file# AND df.create_scn = dropDf.create_scn AND df.plugin_scn = dropDf.plugin_scn; END dropDf; FUNCTION beginDataFileResyncForStandby( high_df_recid IN number ) return boolean IS BEGIN checkResync; SELECT high_df_recid INTO last_df_recid FROM node WHERE node.site_key = this_site_key; deb('high_df_recid='||high_df_recid||',last_df_recid='||last_df_recid); IF last_full_ckp_scn IS NULL THEN deb('beginDataFileResyncForStandby - no full resync'); raise_application_error(-20079, 'full resync from primary database is not done'); END IF; -- resync to catalog only when the previous stored water mark is different -- or the first time the node is seen. Otherwise the file names in -- catalog precedence over control file. IF (high_df_recid > last_df_recid OR last_df_recid IS NULL) THEN last_df_recid := high_df_recid; last_file# := -1; -- initialize for ordering assert -- If standby is ahead of last full resync, open dfQ cursor to see when -- full resync is required. IF this_ckp_scn > last_full_ckp_scn THEN OPEN dfQ; fetchDf; -- do priming read END IF; RETURN TRUE; END IF; deb('no need to resync datafile names for '||this_db_unique_name|| ' standby site'); RETURN FALSE; END; PROCEDURE checkDataFileForStandby(file# IN NUMBER, fname IN VARCHAR2, create_scn IN NUMBER, create_time IN DATE, blocks IN NUMBER, block_size IN NUMBER, ts# IN NUMBER, rfile# IN NUMBER, stop_scn IN NUMBER, read_only IN NUMBER, foreign_dbid IN NUMBER, plugin_scn IN NUMBER) IS local_df_key NUMBER; BEGIN IF (last_file# >= file#) THEN -- assert rows passed in ascending raise_application_error(-20036, 'Invalid record order'); END IF; last_file# := file#; -- for checking next call -- If standby is ahead of last time we did full resync, then signal -- need full resync error as needed. IF this_ckp_scn > last_full_ckp_scn THEN IF (file# != dfRec.file#) THEN IF (file# > dfRec.file#) THEN deb('checkDataFileResyncForStandby - droped file#=' ||dfRec.file#); ELSE -- note that dfRec.file# = MAXNUMVAL is also handled here... deb('checkDataFileResyncForStandby - added file#=' || file#); END IF; raise_application_error(-20079, 'full resync from primary database is not done'); ELSE IF (stop_scn <> dfRec.stop_scn OR stop_scn is null and dfRec.stop_scn is not null OR stop_scn is not null and dfRec.stop_scn is null OR read_only < dfRec.read_only OR read_only is null and dfRec.read_only is not null OR read_only is not null and dfRec.read_only is null) THEN deb('checkDataFileResyncForStandby - change for file#=' || file#); raise_application_error(-20079, 'full resync from primary database is not done'); END IF; END IF; fetchDF; END IF; BEGIN -- must find the df_key select distinct df_key into local_df_key from df, dbinc where file# = checkDataFileForStandby.file# and create_scn = checkDataFileForStandby.create_scn and plugin_scn = checkDataFileForStandby.plugin_scn and decode(foreign_dbid, 0, checkDataFileForStandby.foreign_dbid, foreign_dbid) = checkDataFileForStandby.foreign_dbid and ts# = checkDataFileForStandby.ts# and df.dbinc_key = dbinc.dbinc_key and dbinc.db_key = this_db_key; EXCEPTION -- something is wrong!!! how can entries from df table disappear when -- there are some rows in site_dfatt for specific datafile. WHEN no_data_found THEN raise_application_error(-20999, 'Internal error in checkDataFileForStandby - 1 '); -- assert there are no multiple df_key entries for same file WHEN too_many_rows THEN raise_application_error(-20999, 'Internal error in checkDataFileForStandby - 2 '); END; -- Note that we have only 1 row per df_key in site_dfatt as we track only -- the latest changes at a site. So, the row if exists should reflect -- any changes. BEGIN INSERT INTO site_dfatt(df_key, fname, site_key) VALUES(local_df_key, checkDataFileForStandby.fname, this_site_key); EXCEPTION WHEN dup_val_on_index THEN -- Fix the file name if it has changed. UPDATE site_dfatt SET fname = checkDataFileForStandby.fname WHERE site_key = this_site_key AND df_key = local_df_key; END; END; PROCEDURE endDataFileResyncForStandby IS BEGIN checkResync; -- check if there are any datafiles in rcvcat that the client did not check IF (this_ckp_scn > last_full_ckp_scn AND dfRec.file# < MAXNUMVAL) THEN deb('endDataFileResyncForStandby - dropped file# > ' || dfRec.file#); raise_application_error(-20079, 'full resync from primary database is not done'); IF dfQ%ISOPEN THEN CLOSE dfQ; END IF; END IF; -- set the state variable to indicate that datafile resync is done dfRec.file# := NULL; -- update high_df_resync for the next resync UPDATE node SET high_df_recid = last_df_recid WHERE node.site_key = this_site_key; last_df_recid := NULL; END; FUNCTION beginDataFileResync( high_df_recid IN NUMBER ) RETURN BOOLEAN IS BEGIN checkResync; IF (tsRec.ts# IS NOT NULL) THEN raise_application_error(-20041, 'Tablespace resync not completed'); END IF; SELECT high_df_recid INTO last_df_recid FROM node WHERE site_key = this_site_key; IF (high_df_recid = last_df_recid) THEN deb('beginDataFileResync - Resync of datafiles not needed'); RETURN FALSE; ELSIF (high_df_recid > last_df_recid OR last_df_recid IS NULL) THEN deb('beginDataFileResync - Catalog df_recid: '||nvl(last_df_recid, 0)); last_df_recid := high_df_recid; OPEN dfQ; fetchDf; -- do priming read last_file# := -1; -- initialize for ordering assert if resync_reason = RESYNC_REASON_DF then fullResyncAction.valid := TRUE; fullResyncAction.active := TRUE; fullResyncAction.objtype := RESYNC_OBJECT_DATAFILE; else fullResyncAction.active := FALSE; end if; RETURN TRUE; ELSE raise_application_error(-20035, 'Invalid high recid'); END IF; END beginDataFileResync; PROCEDURE checkDataFile(file# IN NUMBER, fname IN VARCHAR2, create_scn IN NUMBER, create_time IN DATE, blocks IN NUMBER, block_size IN NUMBER, ts# IN NUMBER, stop_scn IN NUMBER, read_only IN NUMBER, stop_time IN DATE DEFAULT NULL, rfile# IN NUMBER DEFAULT NULL, aux_fname IN VARCHAR2 DEFAULT NULL, foreign_dbid IN number DEFAULT 0, foreign_create_scn IN number DEFAULT 0, foreign_create_time IN date DEFAULT NULL, plugged_readonly IN varchar2 DEFAULT 'NO', plugin_scn IN number DEFAULT 0, plugin_reset_scn IN number DEFAULT 0, plugin_reset_time IN date DEFAULT NULL, create_thread IN number DEFAULT NULL, create_size IN number DEFAULT NULL) IS local_df_key NUMBER; changedauxname boolean; BEGIN IF (dfRec.file# IS NULL) THEN -- assert beginDataFileResync was called raise_application_error(-20050, 'Datafile resync not started'); END IF; IF (last_file# >= file#) THEN -- assert rows passed in ascending raise_application_error(-20036, 'Invalid record order'); END IF; last_file# := file#; -- for checking next call IF (plugged_readonly = 'NO' AND create_scn > this_ckp_scn) THEN raise_application_error(-20052, 'Invalid datafile create SCN'); ELSIF (plugged_readonly = 'YES' AND plugin_scn > this_ckp_scn) THEN raise_application_error(-20055, 'Invalid datafile plugin SCN'); END IF; -- if the datafile in dfRec has a lower file# than the datafile -- we are currently checking, it means that the datafile in dfRec has been -- dropped (with its tablespace) and the file# is not currently in use. -- We mark the file dropped at ckp_scn - 1 since we ca not find out the -- exact drop scn. WHILE (file# > dfRec.file#) LOOP deb('checkDatafile - dropping file#: '||to_char(dfRec.file#)); dropDf(dfRec.file#, dfRec.create_scn, dfRec.plugin_scn, this_ckp_scn, this_ckp_time); incResyncActions(RESYNC_ACTION_DROP, dfRec.file#, dfRec.fname); fetchDf; END LOOP; IF (file# < dfRec.file#) THEN -- this datafile is new and must be inserted into rcvcat deb('checkDatafile - adding file#: '||to_char(file#)); addDF(file#, fname, create_time, create_scn, blocks, block_size, ts#, stop_scn, stop_time, read_only, rfile#, foreign_dbid, foreign_create_scn, foreign_create_time, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time, create_thread, create_size); -- set the database clonename (alias aux_name) -- Note that in case that RMAN is 9.0 or greater then aux_fname cannot -- be NULL. So, if aux_fname is NULL we will not change it. IF (aux_fname is not NULL) THEN setCloneName(file#, create_scn, aux_fname, NULL, changedauxname, plugin_scn); END IF; incResyncActions(RESYNC_ACTION_ADD, file#, fname); ELSE -- (file# = dfRec.file#) IF (create_scn = dfRec.create_scn AND plugin_scn = dfRec.plugin_scn) THEN -- this is an existing datafile which is already recorded in rcvcat -- check that create_time and ts# match IF (create_time <> dfRec.create_time) THEN raise_application_error(-20053, 'Invalid datafile create time'); END IF; IF (ts# <> dfRec.ts#) THEN raise_application_error(-20054, 'Invalid datafile ts#'); END IF; SELECT df_key INTO local_df_key FROM df WHERE file# = checkDataFile.file# AND create_scn = checkDataFile.create_scn AND plugin_scn = checkDataFile.plugin_scn AND decode(foreign_dbid, 0, checkDataFile.foreign_dbid, foreign_dbid) = checkDataFile.foreign_dbid AND ts# = checkDataFile.ts# AND dbinc_key = this_dbinc_key; -- if create_size or create_thread is not know, update these -- values now for all entries in df for this datafile only once. IF ((create_thread is not null AND dfRec.create_thread is null) OR (create_size is not null AND dfRec.create_size is null)) THEN UPDATE df SET create_thread = checkDataFile.create_thread, create_size = checkDataFile.create_size WHERE df.df_key = local_df_key; END IF; -- if fname has changed then update dfatt -- mark datafile copies with same name deleted too### IF (fname <> dfRec.fname OR dfRec.fname is NULL) THEN -- If the new datafile name is the same as the clone_name for -- the datafile, then set the cloneName to be the old filename. -- We presume that the old filename is a valid file and is suitable -- for use as the clone name because it was suitable as the real -- filename up until now. IF (fname = dfRec.clone_fname and dfRec.fname is not null) THEN deb('checkDatafile - new datafilename is old auxname'); setCloneName(dfRec.file#, dfRec.create_scn, dfRec.fname, dfRec.clone_fname, changedauxname, dfRec.plugin_scn); END IF; incResyncActions(RESYNC_ACTION_RENAME, dfRec.file#, fname); UPDATE site_dfatt SET fname = checkDataFile.fname WHERE site_key = this_site_key AND df_key = local_df_key; -- if no rows were updated by above update cmd, it means the file -- name at this site is not known to catalog. So add one here. IF sql%rowcount = 0 THEN INSERT INTO site_dfatt (df_key, fname, site_key) VALUES(local_df_key, checkDataFile.fname, this_site_key); END IF; END IF; -- bug 9067641: foreign_dbid is set to 0 during upgrade, update it -- when we know the correct foreign_dbid. IF foreign_dbid <> 0 AND dfrec.foreign_dbid = 0 THEN UPDATE df SET foreign_dbid = checkDataFile.foreign_dbid WHERE df.df_key = local_df_key; deb('checkDatafile - foreign_dbid for file#.df_key('|| local_df_key || ') changed to ' || checkDataFile.foreign_dbid); END IF; -- If the stop SCN or blocks has changed, update the df record. IF ((blocks <> dfrec.blocks) OR (stop_scn <> dfrec.stop_scn) OR (stop_scn IS NULL AND dfrec.stop_scn IS NOT NULL) OR (stop_scn IS NOT NULL AND dfrec.stop_scn IS NULL)) THEN IF blocks <> dfRec.blocks THEN deb('checkDatafile - size changed for file#: '|| to_char(file#)||' from '||to_char(dfRec.blocks)||' to '|| to_char(blocks)); incResyncActions(RESYNC_ACTION_RESIZE, dfRec.file#, fname); ELSE deb('checkDatafile - stopSCN changed for file#: '|| to_char(file#)||' from '|| nvl(to_char(dfRec.stop_scn), 'NULL')||' to '|| nvl(to_char(checkDatafile.stop_scn), 'NULL')); incResyncActions(RESYNC_ACTION_CHANGE, dfRec.file#, fname); END IF; UPDATE df SET stop_scn = checkDataFile.stop_scn, stop_time = checkDataFile.stop_time, read_only = checkDataFile.read_only, blocks = checkDataFile.blocks WHERE df.dbinc_key = this_dbinc_key AND df.file# = dfRec.file# AND df.create_scn = dfRec.create_scn AND df.plugin_scn = dfRec.plugin_scn; ELSE deb('checkDatafile - stopSCN remains the same for file#: '|| to_char(file#)); END IF; -- If the aux_fname has changed, update the df record. -- Note that in case that RMAN is 9.0 or greater then aux_fname -- cannot be NULL. IF (aux_fname is not NULL) THEN setCloneName(dfRec.file#, dfRec.create_scn, aux_fname, dfRec.clone_fname, changedauxname, dfRec.plugin_scn); IF changedauxname THEN incResyncActions(RESYNC_ACTION_CHANGE, dfRec.file#, fname); END IF; END IF; ELSIF ((case when plugged_readonly = 'NO' then create_scn else plugin_scn end) > (case when dfRec.plugged_readonly = 'NO' then dfRec.create_scn else dfRec.plugin_scn end)) THEN -- this datafile has been recreated or has been converted from -- plugged read only to read write. -- We could probably assert that the tablespace that contained the old -- incarnation of this datafile has been dropped, but I would -- rather not be that strict. -- The old incarnation of the datafile must have been dropped before -- new incarnation was created, so mark the old one dropped at -- create_scn. This guarantees that it never appears that two -- incarnations of the same datafile existed at the same scn time. deb('checkDatafile - file#: '||to_char(file#)||' recreated'); dropDf(dfRec.file#, dfRec.create_scn, dfRec.plugin_scn, this_ckp_scn, this_ckp_time); addDf(file#, fname, create_time, create_scn, blocks, block_size, ts#, stop_scn, stop_time, read_only, rfile#, foreign_dbid, foreign_create_scn, foreign_create_time, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time, create_thread, create_size); incResyncActions(RESYNC_ACTION_RECREATE, dfRec.file#, fname); ELSE -- (create_scn < dfRec.create_scn) -- The client passed us a create SCN for this datafile that is -- less than the SCN in the rcvcat. I.e., the target database -- controlfile now contains a previous incarnation of this datafile. -- This can happen only if the user has some old controlfile, or -- has done a resetlogs and not told us about it. IF (plugged_readonly = 'NO') THEN raise_application_error(-20052, 'Invalid datafile create SCN'); ELSE raise_application_error(-20055, 'Invalid datafile plugin SCN'); END IF; END IF; fetchDF; -- get next row from DF cursor END IF; -- (file# < dfRec.file#) END checkDataFile; PROCEDURE endDataFileResync IS BEGIN checkResync; -- check if there are any datafiles in rcvcat that the client did not check WHILE (dfRec.file# < MAXNUMVAL) LOOP -- if we ever allow drop datafile, replace error signalling with dropDf dropDf(dfRec.file#, dfRec.create_scn, dfRec.plugin_scn, this_ckp_scn, this_ckp_time); begin incResyncActions(RESYNC_ACTION_DROP, dfRec.file#, dfRec.fname); exception when others then deb('endTableSpaceResync - caugth exception ' || substr(sqlerrm, 1, 132)); end; fetchDf; END LOOP; -- set the state variable to indicate that datafile resync is done dfRec.file# := NULL; -- update high_df_resync for the next resync UPDATE node SET high_df_recid = last_df_recid WHERE site_key = this_site_key; last_df_recid := NULL; END endDataFileResync; /*-----------------* * Tempfile Resync * *-----------------*/ PROCEDURE fetchTf IS -- private to package body BEGIN -- if already fetched everything, just return IF tfRec.file# = MAXNUMVAL THEN return; END IF; FETCH tfQ INTO tfRec; IF tfQ%NOTFOUND THEN tfRec.file# := MAXNUMVAL; -- indicate end-of-fetch CLOSE tfQ; END IF; END fetchTf; PROCEDURE addTf(file# IN NUMBER, -- private to package body fname IN VARCHAR2, create_time IN DATE, create_scn IN NUMBER, blocks IN NUMBER, block_size IN NUMBER, ts# IN NUMBER, rfile# IN NUMBER, autoextend IN VARCHAR2, max_size IN NUMBER, next_size IN NUMBER) IS ts_create_scn NUMBER; local_tf_key NUMBER; BEGIN BEGIN SELECT create_scn INTO ts_create_scn FROM ts WHERE ts.dbinc_key = this_dbinc_key AND ts.ts# = addTf.ts# AND ts.drop_scn IS NULL; -- in case ts numbers are reused EXCEPTION WHEN no_data_found THEN -- if this tempfile was never resynced from primary, then -- we won't resync the tempfile info from standby, because we don't -- do tablespace resync from standby cf. IF (this_cf_type = 'STANDBY' AND this_db_unique_name is not null) THEN RETURN; END IF; END; -- If the data file is already known to the catalog, get its tf_key; -- otherwise, assign a new tf_key BEGIN SELECT distinct tf_key INTO local_tf_key FROM tf, dbinc WHERE file# = addTf.file# AND create_scn = addTf.create_scn AND (create_time = addTf.create_time or create_time is null AND addTf.create_time is null) AND ts# = addTf.ts# AND rfile# = addTf.rfile# AND tf.dbinc_key = dbinc.dbinc_key AND dbinc.db_key = this_db_key; EXCEPTION WHEN no_data_found THEN SELECT rman_seq.nextval INTO local_tf_key FROM dual; END; BEGIN INSERT INTO tf(dbinc_key, file#, create_scn, create_time, ts#, ts_create_scn, block_size, rfile#, tf_key) VALUES(this_dbinc_key, file#, create_scn, create_time, ts#, ts_create_scn, block_size, rfile#, local_tf_key); EXCEPTION WHEN dup_val_on_index THEN -- If create_scn = 0, it must be a pre-10gR2 tempfile where create_scn -- was not set. We can only track one tempfile with create_scn as 0 -- in current incarnation. Hence, update the old ones. IF create_scn = 0 THEN UPDATE tf SET create_time = addTf.create_time, ts# = addTf.ts#, ts_create_scn = addTf.ts_create_scn, block_size = addTf.block_size, rfile# = addTf.rfile# WHERE dbinc_key = this_dbinc_key AND file# = addTf.file# AND create_scn= addTf.create_scn; END IF; -- we can see an existing entry for tempfile in Data guard environment, -- when resyncing from primary in following cases: -- 1) when current primary has tempfile that was dropped from old primary -- i.e. standby cf has tempfile and switchover was done after the -- the tempfile was dropped from old primary. -- 2) when a primary created a tempfile that re-used the file number -- which marked the tempfile from old primary as dropped. Now after -- swithover back again, we find the tempfile in new primary during -- resync that was marked dropped earlier. -- In these cases, we want to mark the temp file as undropped, which is -- done below by marking drop_scn and drop_time in site_tfatt as null. END; -- Note that we have only one row per tf_key in site_tfatt as we track only -- the latest changes at a site. So, the row if already exists should reflect -- any changes. Also, unmark the file as not dropped. BEGIN INSERT INTO site_tfatt(tf_key, fname, site_key, blocks, autoextend, max_size, next_size) VALUES(local_tf_key, fname, this_site_key, nvl(addTf.blocks, 0), addTf.autoextend, addTf.max_size, addTf.next_size); -- mark tempfile copies with same name deleted too### EXCEPTION WHEN dup_val_on_index THEN -- Update existing record. UPDATE site_tfatt SET fname = addTf.fname, blocks = nvl(addTf.blocks, 0), autoextend = addTf.autoextend, max_size = addTf.max_size, next_size = addTf.next_size, drop_scn = NULL, drop_time = NULL WHERE site_key = this_site_key AND tf_key = local_tf_key; END; END addTf; PROCEDURE dropTf( -- private to package body tf_key IN NUMBER ,drop_scn IN NUMBER ,drop_time IN DATE ) IS BEGIN UPDATE site_tfatt SET drop_scn = dropTf.drop_scn, drop_time = dropTf.drop_time WHERE this_site_key = site_key AND tf_key = dropTf.tf_key; END dropTf; FUNCTION tempFileToResync( high_tf_recid IN NUMBER ) RETURN BOOLEAN IS tf_recid number; BEGIN checkResync; SELECT high_tf_recid INTO tf_recid FROM node WHERE site_key = this_site_key; IF (high_tf_recid = tf_recid) THEN RETURN FALSE; ELSIF (high_tf_recid > tf_recid OR tf_recid IS NULL) THEN RETURN TRUE; ELSE raise_application_error(-20035, 'Invalid high recid'); END IF; END tempFileToResync; -- The tempfile resync is unified with bug fix 6653570. So, the calls -- for standby tempfile resync are just a wrapper over existing -- tempfile resync code at primary database. FUNCTION beginTempFileResyncForStandby( high_tf_recid IN NUMBER ) RETURN BOOLEAN IS BEGIN RETURN beginTempFileResync (high_tf_recid); END beginTempFileResyncForStandby; -- Tempfiles are not same as datafiles on primary and standby. User can add -- new temp files at primary database only when it is opened, and there is no -- "add datafile" redo added for this action. However, when a new temporary -- tablespace is created, "add tablespace" redo is added which is propagated -- to standby also. Any new temp files added to primary are not propagated to -- standby. Only the names that are already existing in primary control file -- are created at standby during standby creation. The file name convert -- parameter apply to the temp files at standby. PROCEDURE checkTempFileForStandby (file# IN NUMBER, fname IN VARCHAR2, create_scn IN NUMBER, create_time IN DATE, blocks IN NUMBER, block_size IN NUMBER, ts# IN NUMBER, rfile# IN NUMBER, autoextend IN VARCHAR2, max_size IN NUMBER, next_size IN NUMBER) IS local_tf_key NUMBER; BEGIN checkTempFile(file#, fname, create_scn, create_time, blocks, block_size, ts#, rfile#, autoextend, max_size, next_size); END checkTempFileForStandby; PROCEDURE endTempFileResyncForStandby IS BEGIN endTempFileResync; END endTempFileResyncForStandby; FUNCTION beginTempFileResync( high_tf_recid IN NUMBER ) RETURN BOOLEAN IS BEGIN checkResync; IF (tsRec.ts# IS NOT NULL) THEN raise_application_error(-20041, 'Tablespace resync not completed'); END IF; SELECT high_tf_recid INTO last_tf_recid FROM node WHERE site_key = this_site_key; IF (high_tf_recid = last_tf_recid) THEN deb('beginTempFileResync - Resync of tempfiles not needed'); RETURN FALSE; ELSIF (high_tf_recid > last_tf_recid OR last_tf_recid IS NULL) THEN deb('beginTempFileResync - Catalog tf_recid: '||nvl(last_tf_recid, 0)); last_tf_recid := high_tf_recid; OPEN tfQ; fetchTf; -- do priming read last_file# := -1; -- initialize for ordering assert if resync_reason = RESYNC_REASON_TF then fullResyncAction.active := TRUE; fullResyncAction.valid := TRUE; fullResyncAction.objtype := RESYNC_OBJECT_TEMPFILE; else fullResyncAction.active := FALSE; end if; RETURN TRUE; ELSE raise_application_error(-20035, 'Invalid high recid'); END IF; END beginTempFileResync; PROCEDURE checkTempFile(file# IN NUMBER, fname IN VARCHAR2, create_scn IN NUMBER, create_time IN DATE, blocks IN NUMBER, block_size IN NUMBER, ts# IN NUMBER, rfile# IN NUMBER, autoextend IN VARCHAR2, max_size IN NUMBER, next_size IN NUMBER) IS local_tf_key NUMBER; BEGIN IF (tfRec.file# IS NULL) THEN -- assert beginTempFileResync was called raise_application_error(-20050, 'Tempfile resync not started'); END IF; IF (last_file# >= file#) THEN -- assert rows passed in ascending raise_application_error(-20036, 'Invalid record order'); END IF; last_file# := file#; -- for checking next call --Bug 5939669: DBPITR can result in tempfile create_scn > ckp_scn --IF (create_scn > this_ckp_scn) THEN -- raise_application_error(-20052, 'Invalid tempfile create SCN'); --END IF; -- if the tempfile in tfRec has a lower file# than the tempfile -- we are currently checking, it means that the tempfile in tfRec has been -- dropped (with its tablespace) and the file# is not currently in use. -- We mark the file dropped at ckp_scn - 1 since we ca not find out the -- exact drop scn. WHILE (file# > tfRec.file#) LOOP dropTf(tfRec.tf_key, this_ckp_scn, this_ckp_time); incResyncActions(RESYNC_ACTION_DROP, tfRec.file#, tfRec.fname); fetchTf; END LOOP; IF (file# < tfRec.file#) THEN addTf(file#, fname, create_time, create_scn, blocks, block_size, ts#, rfile#, autoextend, max_size, next_size); incResyncActions(RESYNC_ACTION_ADD, tfRec.file#, fname); ELSE -- (file# = tfRec.file#) IF (create_scn = 0) THEN addTf(file#, fname, create_time, create_scn, blocks, block_size, ts#, rfile#, autoextend, max_size, next_size); incResyncActions(RESYNC_ACTION_CHANGE, file#, fname); ELSIF (create_scn = tfRec.create_scn) THEN -- this is an existing tempfile which is already recorded in rcvcat -- check that create_time and ts# match IF (create_time <> tfRec.create_time) THEN raise_application_error(-20053, 'Invalid tempfile create time'); END IF; IF (ts# <> tfRec.ts#) THEN raise_application_error(-20054, 'Invalid tempfile ts#'); END IF; -- make any attribute changes, if any addTf(file#, fname, create_time, create_scn, blocks, block_size, ts#, rfile#, autoextend, max_size, next_size); IF (fname <> tfRec.fname OR tfRec.fname is NULL) THEN incResyncActions(RESYNC_ACTION_RENAME, file#, fname); END IF; -- if blocks/autoextend/max_size/next_size have changed then -- update tf. IF (blocks <> tfrec.blocks OR autoextend <> tfrec.autoextend OR max_size <> tfrec.max_size OR next_size <> tfrec.next_size ) THEN IF blocks <> tfrec.blocks THEN incResyncActions(RESYNC_ACTION_RESIZE, file#, fname); ELSE incResyncActions(RESYNC_ACTION_CHANGE, file#, fname); END IF; END IF; ELSE -- (create_scn <> tfRec.create_scn) -- this tempfile has been been recreated -- server ignores creation_change# for tempfiles, so should RMAN -- Just mark the old one as dropped and use the new one as current -- set of temp files. This guarantees that it never appears that two -- incarnations of the same tempfile existed at the same scn time. dropTf(tfRec.tf_key, create_scn, create_time); addTf(file#, fname, create_time, create_scn, blocks, block_size, ts#, rfile#, autoextend, max_size, next_size); incResyncActions(RESYNC_ACTION_RECREATE, file#, fname); END IF; fetchTf; -- get next row from Tf cursor END IF; -- (file# = tfRec.file#) END checkTempFile; PROCEDURE endTempFileResync IS BEGIN checkResync; -- check if there are any tempfiles in rcvcat that the client did not check deb('endTempFileResync - entering with tempfile number'||tfRec.file#); WHILE (tfRec.file# < MAXNUMVAL) LOOP dropTf(tfRec.tf_key, this_ckp_scn, this_ckp_time); incResyncActions(RESYNC_ACTION_DROP, tfRec.file#, tfRec.fname); fetchTf; deb('endTempFileResync - dropping tempfile '||tfRec.file#); END LOOP; -- set the state variable to indicate that tempfile resync is done tfRec.file# := NULL; -- update high_tf_resync for the next resync UPDATE node SET high_tf_recid = last_tf_recid WHERE site_key = this_site_key; last_tf_recid := NULL; END endTempFileResync; /*---------------------* * Redo Thread resync * *---------------------*/ PROCEDURE fetchRt IS BEGIN FETCH rtQ INTO rtRec; IF rtQ%NOTFOUND THEN rtRec.thread# := MAXNUMVAL; CLOSE rtQ; END IF; END fetchRt; PROCEDURE addRt( thread# IN NUMBER ,last_sequence# IN NUMBER ,enable_scn IN NUMBER ,enable_time IN DATE ,disable_scn IN NUMBER ,disable_time IN DATE ,status IN VARCHAR2 ) IS BEGIN INSERT INTO rt (dbinc_key, thread#, sequence#, enable_scn, enable_time, disable_scn, disable_time, status) VALUES (this_dbinc_key, thread#, last_sequence#, enable_scn, enable_time, disable_scn, disable_time, status); END addRt; PROCEDURE dropRt(thread# IN NUMBER) IS BEGIN -- update rt set drop_ckp_key = this_ckp_key DELETE FROM rt WHERE rt.dbinc_key = this_dbinc_key AND rt.thread# = dropRt.thread#; END dropRt; FUNCTION beginThreadResync( high_rt_recid IN NUMBER ) RETURN BOOLEAN IS BEGIN checkResync; SELECT high_rt_recid INTO last_rt_recid FROM node WHERE site_key = this_site_key; IF (high_rt_recid = last_rt_recid) THEN deb('beginThreadResync - Resync of redo threads not needed'); RETURN FALSE; ELSIF (high_rt_recid > last_rt_recid OR last_rt_recid IS NULL) THEN deb('beginThreadResync - Catalog rt_recid: '||nvl(last_rt_recid, 0)); last_rt_recid := high_rt_recid; OPEN rtQ; fetchRt; -- do priming read last_thread# := -1; if resync_reason = RESYNC_REASON_THR then fullResyncAction.valid := TRUE; fullResyncAction.active := TRUE; fullResyncAction.objtype := RESYNC_OBJECT_REDOTHREAD; else fullResyncAction.active := FALSE; end if; RETURN TRUE; ELSE raise_application_error(-20035, 'Invalid high recid'); END IF; END beginThreadResync; PROCEDURE checkThread( thread# IN NUMBER ,last_sequence# IN NUMBER ,enable_scn IN NUMBER ,enable_time IN DATE ,disable_scn IN NUMBER ,disable_time IN DATE ,status IN VARCHAR2 ) IS BEGIN IF (rtRec.thread# IS NULL) THEN raise_application_error(-20061, 'Thread resync not started'); END IF; IF (last_thread# >= thread#) THEN raise_application_error(-20036, 'Invalid record order'); END IF; last_thread# := thread#; WHILE (thread# > rtRec.thread#) LOOP -- if we get here the thread has disappered from the controlfile -- this can happen only if the controlfile is recreated -- mark the thread as dropped in the rcvcat dropRt(rtRec.thread#); incResyncActions(RESYNC_ACTION_DROP, rtRec.thread#, to_char(NULL)); fetchRt; END LOOP; IF (thread# < rtRec.thread#) THEN -- this thread is new and must be inserted into rcvcat addRt(thread#, last_sequence#, enable_scn, enable_time, disable_scn, disable_time, status); incResyncActions(RESYNC_ACTION_ADD, thread#, to_char(NULL)); ELSE -- (thread# = rtRec.thread#) -- this is an existing thread, just update the information UPDATE rt SET sequence# = checkThread.last_sequence#, enable_scn = checkThread.enable_scn, enable_time = checkThread.enable_time, disable_scn = checkThread.disable_scn, disable_time = checkThread.disable_time, status = checkThread.status WHERE rt.dbinc_key = this_dbinc_key AND rt.thread# = checkThread.thread#; incResyncActions(RESYNC_ACTION_CHANGE, rtRec.thread#, to_char(NULL)); fetchRt; END IF; END checkThread; PROCEDURE endThreadResync IS BEGIN WHILE (rtRec.thread# < MAXNUMVAL) LOOP -- if we get here the thread has disappered from the controlfile -- this can happen only if the controlfile is recreated -- mark the thread as dropped in the rcvcat dropRt(rtRec.thread#); fetchRt; END LOOP; rtRec.thread# := NULL; -- update high_rt_resync for the next resync UPDATE node SET high_rt_recid = last_rt_recid WHERE site_key = this_site_key; last_rt_recid := NULL; END endThreadResync; /*------------------------* * Online Redo Log resync * *------------------------*/ -- String compare indepedent of NLS_COMP. Assumes the recover.txt -- cursor also uses NLS_COMP=ANSI and NLS_SORT=ASCII7. -- This is used in 1) online redo log resync 2) guaranteed restore point -- resync and 3) standby redo log resync -- -- Returns 0 if n1 = n2 -- 1 if n1 > n2 -- -1 if n1 < n2 -- NULL if n1 or n2 is NULL -- FUNCTION nlsnamecmp(n1 IN varchar2, n2 IN varchar2) RETURN NUMBER IS CURSOR nlsnamecmp_c(n1 varchar2, n2 varchar2) IS SELECT name FROM (SELECT n1 name FROM dual UNION ALL SELECT n2 name FROM dual) ORDER BY nlssort(name, 'NLS_COMP=ANSI NLS_SORT=ASCII7'); ln1 varchar2(1024); ln2 varchar2(1024); BEGIN if (n1 is null or n2 is null) then return null; elsif (n1 = n2) then return 0; elsif (n1 = chr(1) or n2 = chr(255)) then return -1; elsif (n2 = chr(1) or n1 = chr(255)) then return 1; end if; open nlsnamecmp_c(n1, n2); fetch nlsnamecmp_c into ln1; fetch nlsnamecmp_c into ln2; close nlsnamecmp_c; if (ln1 = n1) then return -1; end if; return 1; END nlsnamecmp; PROCEDURE fetchOrl IS BEGIN FETCH orlQ INTO orlRec; IF orlQ%NOTFOUND THEN orlRec.fname := chr(255); -- assume chr(255) is greater than any name CLOSE orlQ; END IF; END fetchOrl; PROCEDURE addOrl( thread# IN NUMBER ,group# IN NUMBER ,fname IN VARCHAR2 ,bytes IN NUMBER ,type IN VARCHAR2 ) IS thread_not_found EXCEPTION; PRAGMA EXCEPTION_INIT(thread_not_found, -2291); BEGIN INSERT INTO orl (dbinc_key, thread#, group#, fname, bytes, type, site_key) VALUES (this_dbinc_key, thread#, group#, fname, bytes, type, this_site_key); EXCEPTION WHEN thread_not_found THEN -- Standby redo logs are not cleared during activation, hence can belong -- to non-existing or unknown h threads. bug 9289630. IF type <> 'STANDBY' THEN raise_application_error(-20079, 'full resync from primary database is not done'); ELSE deb('ignored resync of standby redo log ' || fname); END IF; END addOrl; PROCEDURE dropOrl(fname IN VARCHAR2) IS BEGIN -- update orl set drop_ckp_key = this_ckp_key DELETE FROM orl WHERE orl.dbinc_key = this_dbinc_key AND orl.site_key = this_site_key AND orl.fname = dropOrl.fname; END dropOrl; FUNCTION beginOnlineRedoLogResync( high_orl_recid IN NUMBER ) RETURN BOOLEAN IS BEGIN checkResync; SELECT high_orl_recid INTO last_orl_recid FROM node WHERE site_key = this_site_key; IF (high_orl_recid = last_orl_recid) THEN deb('beginOnlineRedoLogResync - Resync of online logs not needed'); RETURN FALSE; ELSIF (high_orl_recid > last_orl_recid OR last_orl_recid IS NULL) THEN deb('beginOnlineRedoLogResync - Catalog orl_recid: '|| nvl(last_orl_recid, 0)); last_orl_recid := high_orl_recid; OPEN orlQ; fetchOrl; last_fname := chr(1); -- assume chr(1) is less than any name if resync_reason = RESYNC_REASON_ORL then fullResyncAction.active := TRUE; fullResyncAction.valid := TRUE; fullResyncAction.objtype := RESYNC_OBJECT_ONLINELOG; else fullResyncAction.active := FALSE; end if; RETURN TRUE; ELSE raise_application_error(-20035, 'Invalid high recid'); END IF; END beginOnlineRedoLogResync; PROCEDURE checkOnlineRedoLog( thread# IN NUMBER ,group# IN NUMBER ,fname IN VARCHAR2 ,bytes IN NUMBER DEFAULT NULL ,type IN VARCHAR2 DEFAULT 'ONLINE' ) IS BEGIN IF (orlRec.fname IS NULL) THEN raise_application_error(-20061, 'Redo resync not started'); END IF; IF (nlsnamecmp(last_fname, fname) >= 0) THEN raise_application_error(-20036, 'Invalid record order'); END IF; last_fname := fname; WHILE (nlsnamecmp(fname, orlRec.fname) > 0) LOOP -- if we get here the online log has disappered from the controlfile -- this can happen only if the controlfile is recreated -- mark the online log as dropped in the rcvcat dropOrl(orlRec.fname); incResyncActions(RESYNC_ACTION_DROP, to_number(NULL), orlRec.fname); fetchOrl; END LOOP; IF (nlsnamecmp(fname, orlRec.fname) < 0) THEN -- this online log is new and must be inserted into rcvcat addOrl(thread#, group#, fname, bytes, type); incResyncActions(RESYNC_ACTION_ADD, to_number(NULL), fname); ELSE -- (fname = orlRec.fname) UPDATE orl SET thread# = checkOnlineRedoLog.thread#, group# = checkOnlineRedoLog.group#, bytes = checkOnlineRedoLog.bytes, type = checkOnlineRedoLog.type WHERE orl.dbinc_key = this_dbinc_key AND orl.fname = checkOnlineRedoLog.fname AND orl.site_key = this_site_key; incResyncActions(RESYNC_ACTION_CHANGE, to_number(NULL), orlRec.fname); fetchOrl; END IF; END checkOnlineRedoLog; PROCEDURE endOnlineRedoLogResync IS BEGIN WHILE (orlRec.fname != chr(255)) LOOP -- if we get here the thread has disappered from the controlfile -- this can happen only if the controlfile is recreated -- mark the thread as dropped in the rcvcat dropOrl(orlRec.fname); incResyncActions(RESYNC_ACTION_DROP, to_number(NULL), orlRec.fname); fetchOrl; END LOOP; orlRec.fname := NULL; -- update high_orl_resync for the next resync UPDATE node SET high_orl_recid = last_orl_recid WHERE site_key = this_site_key; last_orl_recid := NULL; END endOnlineRedoLogResync; /*---------------------------------* * Guaranteed restore point Resync * *---------------------------------*/ PROCEDURE fetchGrsp IS BEGIN FETCH grspQ INTO grspRec; IF grspQ%NOTFOUND THEN grspRec.rspname := chr(255); -- assume chr(255) is greater than any name CLOSE grspQ; END IF; END fetchGrsp; PROCEDURE addGrsp( rspname IN VARCHAR2 ,from_scn IN NUMBER ,to_scn IN NUMBER ,dbinc_key IN NUMBER ,create_time IN DATE ,rsp_time IN DATE ,guaranteed IN VARCHAR2 ) IS BEGIN INSERT INTO grsp (dbinc_key, rspname, from_scn, to_scn, creation_time, rsptime, guaranteed, site_key) VALUES (dbinc_key, rspname, from_scn, to_scn, create_time, rsp_time, guaranteed, this_site_key); END addGrsp; PROCEDURE dropGrsp( rspname IN VARCHAR2) IS BEGIN DELETE FROM grsp WHERE grsp.rspname = dropGrsp.rspname AND grsp.site_key = this_site_key; END dropGrsp; FUNCTION beginGuaranteedRPResync( high_grsp_recid IN NUMBER ) RETURN BOOLEAN IS BEGIN checkResync; SELECT node.high_grsp_recid INTO last_grsp_recid FROM node WHERE site_key = this_site_key; IF (high_grsp_recid = last_grsp_recid) THEN RETURN FALSE; ELSIF (high_grsp_recid > last_grsp_recid OR last_grsp_recid IS NULL) THEN last_grsp_recid := high_grsp_recid; OPEN grspQ; fetchGrsp; last_rspname := chr(1); -- assume chr(1) is less than any name RETURN TRUE; ELSE raise_application_error(-20035, 'Invalid high recid'); END IF; END beginGuaranteedRPResync; PROCEDURE checkGuaranteedRP( rspname IN VARCHAR2 ,from_scn IN NUMBER ,to_scn IN NUMBER ,resetlogs_change# IN NUMBER ,resetlogs_time IN DATE ,create_time IN DATE DEFAULT NULL ,rsp_time IN DATE DEFAULT NULL ,guaranteed IN VARCHAR2 DEFAULT 'YES' ) IS dbinc_key number; BEGIN IF (grspRec.rspname IS NULL) THEN raise_application_error(-20099, 'restore point resync not started'); END IF; IF (nlsnamecmp(last_rspname, rspname) >= 0) THEN raise_application_error(-20036, 'Invalid record order'); END IF; last_rspname := rspname; dbinc_key := checkIncarnation(resetlogs_change#, resetlogs_time); WHILE (nlsnamecmp(rspname, grspRec.rspname) > 0) LOOP -- if we get here the guaranteed restore point has disappered from the -- controlfile this can happen only if the controlfile is recreated -- mark the restore point as dropped in the rcvcat dropGrsp(grspRec.rspname); fetchGrsp; END LOOP; IF (nlsnamecmp(rspname, grspRec.rspname) < 0) THEN -- this restore point is new and must be inserted into rcvcat addGrsp(rspname, from_scn, to_scn, dbinc_key, create_time, rsp_time, guaranteed); ELSE -- (rspname = grspRec.rspname) -- this is an existing restore point, just update the information UPDATE grsp SET from_scn = checkGuaranteedRP.from_scn, to_scn = checkGuaranteedRP.to_scn, rsptime = checkGuaranteedRP.rsp_time, guaranteed = checkGuaranteedRP.guaranteed, dbinc_key = dbinc_key WHERE grsp.rspname = checkGuaranteedRP.rspname AND grsp.site_key = this_site_key; fetchGrsp; END IF; END checkGuaranteedRP; PROCEDURE endGuaranteedRPResync IS BEGIN WHILE (grspRec.rspname != chr(255)) LOOP -- if we get here the restore point has disappered from the controlfile -- this can happen only if the controlfile is recreated or dropped dropGrsp(grspRec.rspname); fetchGrsp; END LOOP; grspRec.rspname := NULL; -- update high_grsp_resync for the next resync UPDATE node SET high_grsp_recid = last_grsp_recid WHERE site_key = this_site_key; last_grsp_recid := NULL; END endGuaranteedRPResync; /*-----------------------------------* * RMAN Configuration records resync * *-----------------------------------*/ FUNCTION beginConfigResync( high_conf_recid IN NUMBER ) RETURN NUMBER IS BEGIN checkResync; SELECT high_conf_recid INTO last_conf_recid FROM node WHERE site_key = this_site_key; IF (high_conf_recid = last_conf_recid) THEN RETURN CONFIGRESYNC_NO; -- no resync needed ELSIF (last_conf_recid IS NULL OR high_conf_recid > last_conf_recid) THEN last_conf_recid := high_conf_recid; RETURN CONFIGRESYNC_TORC; -- we need resync from CF to RC ELSE last_conf_recid := high_conf_recid; RETURN CONFIGRESYNC_TOCF; -- we need resync from RC to CF END IF; END beginConfigResync; PROCEDURE endConfigResync IS BEGIN -- update high_conf_recid for the next resync UPDATE node SET high_conf_recid = last_conf_recid WHERE site_key = this_site_key; last_conf_recid := NULL; END endConfigResync; FUNCTION beginConfigResync2( high_conf_recid IN NUMBER ) RETURN NUMBER IS to_CF boolean := FALSE; to_Catalog boolean := FALSE; local_force_resync2cf VARCHAR2(3) := 'NO'; curr_cf_version_time DATE; conf_count NUMBER; BEGIN checkResync; SELECT node.high_conf_recid, node.force_resync2cf, cf_create_time INTO last_conf_recid, local_force_resync2cf, curr_cf_version_time FROM node WHERE site_key = this_site_key; -- If local_force_resync2cf is set then we have to resync into controlfile. IF (local_force_resync2cf = 'YES') THEN to_CF := TRUE; END IF; -- If this is the first time the site is exposed to catalog always -- get the existing configuration from the control file. IF (last_cf_version_time is NULL) THEN SELECT COUNT(*) INTO conf_count FROM CONF WHERE site_key = this_site_key; IF conf_count = 0 THEN to_Catalog := TRUE; END IF; END IF; -- If this is not same as last control file... perform resync based on -- recid. For current control file, trust control file if it has more -- configuration recid than catalog. For backup, always trust recovery -- catalog. IF (last_cf_version_time <> curr_cf_version_time) THEN IF (this_cf_type = 'CURRENT') THEN IF high_conf_recid > last_conf_recid THEN to_Catalog := TRUE; ELSIF (high_conf_recid < last_conf_recid) THEN to_CF := TRUE; END IF; ELSE to_CF := TRUE; END IF; END IF; -- If recovery catalog configuration recid is smaller than recid in -- controlfile (that is high_conf_stamp), then we will resync from -- controlfile to catalog. -- Otherwise, if last_conf_recidis greater, the data will go into -- controlfile. IF (last_cf_version_time = curr_cf_version_time) THEN IF (high_conf_recid > last_conf_recid) THEN to_Catalog := TRUE; ELSIF (high_conf_recid < last_conf_recid) THEN to_CF := TRUE; END IF; END IF; -- If no records are to be moved from control file to/from catalog, no -- resync needed. IF (NOT to_Catalog AND NOT to_CF) THEN RETURN CONFIGRESYNC_NO; END IF; -- Initialize water marks for next resync, timestamps are fixed as we -- resync individual configuration records. last_conf_recid := high_conf_recid; -- check if we need to fix only control file IF (NOT to_Catalog AND to_CF) THEN RETURN CONFIGRESYNC_TOCF; END IF; -- check if we need to only resync to catalog IF (to_Catalog AND NOT to_CF) THEN RETURN CONFIGRESYNC_TORC; END IF; -- Check if we need to do two way updates, i.e. to control file and to -- recovery catalog. IF (to_Catalog AND to_CF) THEN RETURN CONFIGRESYNC_TORC_TOCF; END IF; END beginConfigResync2; PROCEDURE endConfigResync2(sync_to_cf_pending IN boolean DEFAULT FALSE) IS db_role node.database_role%type; cf_pending number := 0; BEGIN IF sync_to_cf_pending THEN cf_pending := 1; END IF; IF (force_resync2cf = 'YES') THEN deb('endConfigResync2 - force_resync2cf = TRUE'); -- Set bit which forces resync from cf to TRUE for all -- others nodes except this one. UPDATE node SET node.force_resync2cf = 'YES' WHERE node.db_key = this_db_key AND site_key <> this_site_key; END IF; -- Update high_conf_recid in the node table only for this -- db_unique_name. This means that we will not cause resync for other -- nodes. -- Also, update force_resync2cf to FALSE, because we are sure that -- we have resynced everything. UPDATE node SET node.high_conf_recid = last_conf_recid, node.force_resync2cf = decode(cf_pending, 1, 'YES', 'NO') WHERE site_key = this_site_key; deb('endConfigResync2 - last_conf_recid='||last_conf_recid); force_resync2cf := 'NO'; last_conf_recid := NULL; END endConfigResync2; PROCEDURE getConfig( conf# OUT number ,name IN OUT varchar2 ,value IN OUT varchar2 ,first IN boolean) IS eof boolean := FALSE; BEGIN -- call getConfig from rcvman package. Note that in this we call -- recovery catalog version! dbms_rcvman.getConfig(conf#, name, value, first); END getConfig; PROCEDURE updateRestorePoint( lowscn IN NUMBER ,highscn IN NUMBER DEFAULT NULL -- next scn by another name ) IS nextscn number; refs number; BEGIN -- Default nextscn (not provided for datafile backups) IF (highscn is null) THEN nextscn := lowscn + 1; ELSE nextscn := highscn; END IF; -- Just set all the restore points in this range to NULL, for unknown. -- Routine cleanupNRS will set proper YES/NO value. UPDATE nrsp r SET LONG_TERM = NULL WHERE r.to_scn >= lowscn AND r.to_scn <= nextscn AND r.long_term IS NOT NULL AND r.site_key = this_site_key; deb('updateRestorePoint - (lowscn ' || lowscn || ' - highscn ' || nextscn || ') rows updated ' || sql%rowcount); END updateRestorePoint; /*-------------------------* * Redo Log History resync * *-------------------------*/ FUNCTION beginLogHistoryResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN -- find the highest log history recid that has been recorded in rcvcat -- and return it to the caller SELECT high_rlh_recid INTO last_rlh_recid FROM node WHERE site_key = this_site_key; ELSE last_rlh_recid := sessionWaterMarks.high_rlh_recid; END IF; RETURN last_rlh_recid; END beginLogHistoryResync; FUNCTION getLogHistoryLowSCN RETURN NUMBER IS lowSCN number; BEGIN checkResync; SELECT nvl(max(low_scn), 0) INTO lowSCN FROM rlh WHERE rlh.dbinc_key = this_dbinc_key; RETURN lowSCN; END getLogHistoryLowSCN; PROCEDURE checkLogHistory( rlh_recid IN NUMBER ,rlh_stamp IN NUMBER ,thread# IN NUMBER ,sequence# IN NUMBER ,low_scn IN NUMBER ,low_time IN DATE ,next_scn IN NUMBER ,reset_scn IN number default NULL ,reset_time IN date default NULL ) IS local rlh%rowtype; BEGIN IF (last_rlh_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (rlh_recid < last_rlh_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (rlh_recid > last_rlh_recid + 1) THEN -- there is gap in log history -- not sure what we should do here NULL; END IF; last_rlh_recid := rlh_recid; IF (last_dbinc_key is NULL or reset_scn is NULL) THEN deb('checkLogHistory - Init last_dbinc_key'); last_dbinc_key := this_dbinc_key; select reset_scn, reset_time into last_reset_scn, last_reset_time from dbinc where dbinc_key = this_dbinc_key; END IF; IF (reset_scn IS NOT NULL and reset_time IS NOT NULL) THEN IF (reset_scn <> last_reset_scn or reset_time <> last_reset_time) THEN BEGIN deb('checkLogHistory - new last_dbinc_key'); deb('checkLogHistory - for reset_time ' || checkLogHistory.reset_time || ' reset_scn ' || checkLogHistory.reset_scn || ' this_db_key ' || this_db_key); select dbinc_key into last_dbinc_key from dbinc where reset_time = checkLogHistory.reset_time and reset_scn = checkLogHistory.reset_scn and db_key = this_db_key; last_reset_scn := reset_scn; last_reset_time := reset_time; EXCEPTION WHEN others THEN raise_application_error(-29999, 'Unknown Incarnation'); END; END IF; END IF; deb('checkLogHistory - last_dbinc_key='||last_dbinc_key|| ' reset_scn '||reset_scn || ' reset_time '||reset_time); BEGIN INSERT INTO rlh( rlh_key, dbinc_key, rlh_recid, rlh_stamp, thread#, sequence#, low_scn, low_time, next_scn) VALUES( rman_seq.nextval, last_dbinc_key, rlh_recid, rlh_stamp, thread#, sequence#, low_scn, low_time, next_scn); EXCEPTION WHEN dup_val_on_index THEN -- We already have a rlh for the same incarnation with same thread#, -- sequence# and low_scn RETURN; END; END checkLogHistory; PROCEDURE endLogHistoryResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN -- set the high_rlh_recid for the next resync UPDATE node SET high_rlh_recid = last_rlh_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_rlh_recid := last_rlh_recid; last_rlh_recid := NULL; END endLogHistoryResync; /*-------------------------* * Archived Log resync * *-------------------------*/ FUNCTION beginArchivedLogResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_al_recid INTO last_al_recid FROM node WHERE site_key = this_site_key; ELSE last_al_recid := sessionWaterMarks.high_al_recid; END IF; RETURN last_al_recid; END beginArchivedLogResync; PROCEDURE deleteDuplicateAL(recid IN NUMBER, stamp IN NUMBER, fname in VARCHAR2) IS lfname al.fname%TYPE; BEGIN lfname := fname; IF lfname is null THEN BEGIN SELECT fname INTO lfname from AL WHERE al_recid = recid AND al_stamp = stamp AND al.dbinc_key in (select dbinc_key from dbinc where db_key = this_db_key); EXCEPTION WHEN no_data_found THEN RETURN; WHEN too_many_rows THEN -- unique key is dbinc_key, al_recid, al_stamp RETURN; END; END IF; -- Mark any previous archived logs with the same name except the one -- with given recid/stamp as deleted. DELETE al WHERE al.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND al.fname = lfname AND ((nvl(al.site_key, this_site_key) = this_site_key) OR (logs_shared = TRUE#)) AND al.fname_hashkey = substr(lfname,1,10)||substr(lfname,-10) AND NOT (al.al_recid = recid AND al.al_stamp = stamp ); END deleteDuplicateAL; PROCEDURE checkArchivedLog( al_recid IN NUMBER ,al_stamp IN NUMBER ,thread# IN NUMBER ,sequence# IN NUMBER ,reset_scn IN NUMBER ,reset_time IN DATE ,low_scn IN NUMBER ,low_time IN DATE ,next_scn IN NUMBER ,next_time IN DATE ,blocks IN NUMBER ,block_size IN NUMBER ,fname IN VARCHAR2 ,archived IN VARCHAR2 ,completion_time IN DATE ,status IN VARCHAR2 ,is_standby IN VARCHAR2 ,dictionary_begin IN VARCHAR2 default NULL ,dictionary_end IN VARCHAR2 default NULL ,is_recovery_dest_file IN VARCHAR2 default 'NO' ,compressed IN VARCHAR2 default 'NO' ,creator IN VARCHAR2 default NULL ,terminal IN VARCHAR2 default 'NO' ) IS local al%rowtype; my_dbinc_key NUMBER; BEGIN IF (last_al_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (al_recid < last_al_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (al_recid > last_al_recid + 1) THEN -- there is gap in archived log -- not sure what we should do here NULL; END IF; last_al_recid := al_recid; IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; IF (al_stamp < kccdivts) THEN deb('checkArchivedLog - ignoring record kccdivts='||kccdivts); RETURN; -- obsolete record from a backup controlfile END IF; -- see if this log is a log that was cleared by a resetlogs. if so, -- skip it. IF (sequence# = 0) THEN RETURN; END IF; -- find the database incarnation of this archived log -- calling the procedure is expensive, we can optimize as in case of resyncs -- for offr and rlh. This optimization can be done within checkIncarnation. my_dbinc_key := checkIncarnation(reset_scn, reset_time); BEGIN IF (status = 'D') THEN -- Do not bother to insert this record. NULL; ELSE INSERT INTO al (al_key, dbinc_key, al_recid, al_stamp, thread#, sequence#, low_scn, low_time, next_scn, next_time, fname, fname_hashkey, archived, blocks, block_size, completion_time, status, is_standby, dictionary_begin, dictionary_end, is_recovery_dest_file, compressed, creator, terminal, site_key) VALUES (rman_seq.nextval, my_dbinc_key, al_recid, al_stamp, thread#, sequence#, low_scn, low_time, next_scn, next_time, fname, substr(fname,1,10)||substr(fname, -10), archived, blocks, checkArchivedLog.block_size, completion_time, status, is_standby, dictionary_begin, dictionary_end, is_recovery_dest_file, compressed, creator, terminal, this_site_key); deleteDuplicateAL(al_recid, al_stamp, fname); END IF; -- Note that also cleared entries are inserted into rcvcat. -- Update log history entry. If fname is null and it is not archived -- then the log was cleared. Otherwise, this could as well be a deleted -- archived log file. IF checkArchivedLog.archived = 'N' then UPDATE rlh SET status = decode(fname, NULL, 'C', status) WHERE rlh.dbinc_key = my_dbinc_key AND rlh.thread# = checkArchivedLog.thread# AND rlh.sequence# = checkArchivedLog.sequence# AND rlh.low_scn = checkArchivedLog.low_scn; END IF; EXCEPTION WHEN dup_val_on_index THEN deb('checkArchivedLog - Inside dup_val_on_index exception'); -- this archived log already exist in rcvcat. Get the existing archived -- record to validate it. SELECT * INTO local FROM al WHERE al.dbinc_key = my_dbinc_key AND (al.is_standby = checkArchivedLog.is_standby OR (al.is_standby is NULL AND checkArchivedLog.is_standby is NULL)) AND al.al_recid = checkArchivedLog.al_recid AND al.al_stamp = checkArchivedLog.al_stamp; -- change stamp and resync record, if client is site aware... IF client_site_aware AND this_site_key <> local.site_key THEN raise_application_error(-20081, 'change stamp for the record'); END IF; -- check that fname matches IF (fname <> local.fname) THEN deb('checkArchivedLog - input fname ['||fname||']; local.fname ['|| local.fname || ']'); raise_application_error(-20080, 'Invalid archived log name'); END IF; END; END checkArchivedLog; PROCEDURE endArchivedLogResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_al_recid = last_al_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_al_recid := last_al_recid; last_al_recid := NULL; END endArchivedLogResync; /*-------------------------* * Offline range resync * *-------------------------*/ FUNCTION beginOfflineRangeResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_offr_recid INTO last_offr_recid FROM node WHERE site_key = this_site_key; ELSE last_offr_recid := sessionWaterMarks.high_offr_recid; END IF; RETURN last_offr_recid; END beginOfflineRangeResync; PROCEDURE checkOfflineRange( offr_recid IN NUMBER ,offr_stamp IN NUMBER ,file# IN NUMBER ,create_scn IN NUMBER ,offline_scn IN NUMBER ,online_scn IN NUMBER ,online_time IN DATE ,cf_create_time IN DATE ,reset_scn IN number default NULL ,reset_time IN date default NULL ) IS local offr%rowtype; -- Banand Jan-01-2006 - Rewrote completly routine, after dropping constraint -- offr_u1(dbinc_key, offr_recid, offr_stamp) BEGIN IF (last_offr_recid IS NULL AND offr_recid IS NOT NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; deb('Checkofflinerange - '|| ' recid: '|| nvl(to_char(offr_recid), 'NULL')|| ' stamp: '|| nvl(to_char(offr_stamp), 'NULL')|| ' file#: '|| file#|| ' create_scn: '|| nvl(to_char(create_scn), 'NULL')|| ' offline_scn: '|| offline_scn || ' online_scn: '|| online_scn|| ' online_time: '|| online_time|| ' cf_create_time: '|| cf_create_time|| ' reset_scn:'|| nvl(reset_scn, -1)); last_offr_recid := offr_recid; IF (last_dbinc_key is NULL OR reset_scn IS NULL) THEN deb('checkOfflineRange - Init dbinc_key: '||this_dbinc_key); last_dbinc_key := this_dbinc_key; SELECT reset_scn, reset_time INTO last_reset_scn, last_reset_time FROM dbinc WHERE dbinc_key = this_dbinc_key; END IF; IF (reset_scn IS NOT NULL and reset_time IS NOT NULL) THEN IF (reset_scn <> last_reset_scn or reset_time <> last_reset_time) THEN BEGIN deb('checkOfflineRange - new incarnation detected'|| ' reset_scn: '|| reset_scn|| ' last_reset_scn: '|| last_reset_scn); SELECT dbinc_key INTO last_dbinc_key FROM dbinc WHERE reset_time = checkOfflineRange.reset_time AND reset_scn = checkOfflineRange.reset_scn AND db_key = this_db_key; last_reset_scn := reset_scn; last_reset_time := reset_time; EXCEPTION WHEN others THEN raise_application_error(-20070, 'Unknown Incarnation'); END; END IF; END IF; deb('checkOfflineRange - dbinc_key is: '||last_dbinc_key); deb('checkOfflineRange - Looking if offline range record already '|| 'exists in OFFR'); BEGIN -- We must get either one row or no rows, otherwise it is error... SELECT distinct file#, create_scn, offline_scn, online_scn, online_time INTO local.file#, local.create_scn, local.offline_scn, local.online_scn, local.online_time FROM offr WHERE dbinc_key = last_dbinc_key AND file# = checkOfflineRange.file# AND create_scn = checkOfflineRange.create_scn AND offline_scn = checkOfflineRange.offline_scn; IF local.online_scn <> checkOfflineRange.online_scn THEN deb('checkOfflineRange - Online_scn OK?'|| ' online_scn: ' || online_scn || ' local.online_scn: ' || local.online_scn); -- raise_application_error(-20087, 'Invalid online SCN'); END IF; IF local.online_time <> checkOfflineRange.online_time THEN deb('checkOfflineRange - Online_time OK?'|| ' online_time: ' || online_time || ' local.online_time: ' || local.online_time); -- raise_application_error(-20089, 'Invalid online time'); END IF; EXCEPTION WHEN no_data_found THEN NULL; -- offline range record not yet known to catalog, go to insert WHEN too_many_rows THEN RAISE; -- there must not be more then on offline range with same -- dbinc_key, file#, create_scn, and offline_scn WHEN others THEN RAISE; END; BEGIN INSERT INTO offr(offr_key, dbinc_key, offr_recid, offr_stamp, file#, create_scn, offline_scn, online_scn, online_time, cf_create_time) VALUES(rman_seq.nextval, last_dbinc_key, offr_recid, nvl(offr_stamp,0), file#, create_scn, offline_scn, online_scn, online_time, cf_create_time); incResyncActions(RESYNC_ACTION_CHANGE, file#, to_char(NULL)); deb('checkOfflineRange - Succesfully inserted new OFFR.'); EXCEPTION WHEN dup_val_on_index THEN deb('checkOfflineRange - record already exists'); IF offr_recid > 0 AND offr_stamp > 0 THEN deb('checkOfflineRange - update new offr_recid, offr_stamp, '|| 'online_scn and online_time'); UPDATE OFFR SET offr_recid = checkOfflineRange.offr_recid, offr_stamp = checkOfflineRange.offr_stamp, online_scn = checkOfflineRange.online_scn, online_time= checkOfflineRange.online_time WHERE dbinc_key = last_dbinc_key AND file# = checkOfflineRange.file# AND create_scn = checkOfflineRange.create_scn AND offline_scn = checkOfflineRange.offline_scn AND cf_create_time = checkOfflineRange.cf_create_time; incResyncActions(RESYNC_ACTION_CHANGE, file#, to_char(NULL)); END IF; END; deb('checkOfflineRange - exiting'); END; PROCEDURE endOfflineRangeResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_offr_recid = last_offr_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_offr_recid := last_offr_recid; last_offr_recid := NULL; END endOfflineRangeResync; /*-------------------------* * Backup Set resync * *-------------------------*/ -- updateBackupSetRec calculates and updates the status of a backup set -- based on its backup pieces. The backup set is 'A' (available) if there -- is an available copy of all of its pieces on one device type. The -- backup set is 'D' (deleted) if it has no backup pieces (either all pieces -- are deleted, or this was a backup validate). Otherwise it is marked 'O' -- (other). -- Here we want to look at all backup pieces, not just accessible at this -- site, before updating backup set status. This has to be done so, because -- a backup set's pieces can belong to different sites and all pieces may -- not accessible at this site may be accessible to some other site. PROCEDURE updateBackupSetRec(bs_key IN NUMBER) IS total_pieces NUMBER; backup_validate VARCHAR2(3); available_pieces NUMBER; new_status VARCHAR2(1); bskeep NUMBER; bstype VARCHAR2(1); low NUMBER := NULL; high NUMBER := NULL; bs_site_key NUMBER := NULL; pieces_on_msite NUMBER; new_site_key NUMBER; BEGIN SELECT pieces,input_file_scan_only, keep_options, bck_type, site_key INTO total_pieces,backup_validate, bskeep, bstype, bs_site_key FROM bs WHERE bs.bs_key = updateBackupSetRec.bs_key; IF nvl(backup_validate,'NO') <> 'YES' THEN SELECT max(count(DISTINCT piece#)) INTO available_pieces FROM bp WHERE bp.bs_key = updateBackupSetRec.bs_key AND bp.status = 'A' GROUP BY device_type; END IF; -- if all pieces are not on one site, set site_key to null IF bs_site_key IS NULL OR bs_site_key <> this_site_key THEN SELECT count(distinct nvl(site_key, 0)) INTO pieces_on_msite FROM bp WHERE bs_key = updateBackupSetRec.bs_key; IF pieces_on_msite = 1 THEN SELECT distinct site_key INTO new_site_key FROM BP WHERE bs_key = updateBackupSetRec.bs_key; END IF; -- update site_key in BS to new_site_key or null UPDATE bs SET site_key = new_site_key WHERE bs.bs_key = updateBackupSetRec.bs_key; END IF; IF (total_pieces = 0 or backup_validate = 'YES') THEN -- Bug 1467871: Remove dummy records inserted in 8.1.6- versions RMAN new_status := 'D'; ELSIF (available_pieces = total_pieces) THEN new_status := 'A'; ELSE BEGIN -- set new_status to 'O' (other) if some non-deleted rows found SELECT 'O' INTO new_status FROM bp WHERE bp.bs_key = updateBackupSetRec.bs_key AND bp.status != 'D' AND rownum < 2; EXCEPTION WHEN no_data_found THEN new_status := 'D'; -- all pieces are deleted or not there END; END IF; IF new_status in ('O', 'A') OR backup_validate = 'YES' THEN UPDATE bs SET status = new_status WHERE bs.bs_key = updateBackupSetRec.bs_key; ELSE -- Manage LONG_TERM flag on restore points that might be useful for this IF (bskeep > 0 and bstype = 'L') THEN SELECT min(low_scn), max(next_scn) INTO low, high FROM brl WHERE bs_key = updateBackupSetRec.bs_key; END IF; IF (bskeep > 0 and bstype = 'D') THEN SELECT min(ckp_scn) INTO low FROM bdf WHERE bs_key = updateBackupSetRec.bs_key; END IF; -- Note that the things in the backup set will automatically be deleted -- because the referential integrity contraints use ON DELETE CASCADE DELETE FROM bs WHERE bs.bs_key = updateBackupSetRec.bs_key; -- Alter the restore point table to reflect deleted backupSet IF (low IS NOT NULL) THEN updateRestorePoint(low, high); END IF; END IF; END updateBackupSetRec; FUNCTION beginBackupSetResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_bs_recid INTO last_bs_recid FROM node WHERE site_key = this_site_key; ELSE last_bs_recid := sessionWaterMarks.high_bs_recid; END IF; RETURN last_bs_recid; END beginBackupSetResync; PROCEDURE checkBackupSet( bs_recid IN NUMBER ,bs_stamp IN NUMBER ,set_stamp IN NUMBER ,set_count IN NUMBER ,bck_type IN VARCHAR2 ,incr_level IN NUMBER DEFAULT NULL ,pieces IN NUMBER ,start_time IN DATE ,completion_time IN DATE ,controlfile_included IN VARCHAR2 DEFAULT NULL ,input_file_scan_only IN VARCHAR2 DEFAULT NULL ,keep_options IN NUMBER DEFAULT 0 ,keep_until IN DATE DEFAULT NULL ,block_size IN NUMBER DEFAULT NULL ,multi_section IN VARCHAR2 DEFAULT NULL ) IS local bs%rowtype; newbskey number; BEGIN IF (last_bs_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (bs_recid < last_bs_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (bs_recid > last_bs_recid + 1) THEN -- there is gap in backup set records NULL; END IF; last_bs_recid := bs_recid; IF (bs_stamp < kccdivts) THEN deb('checkBackupSet - ignoring record kccdivts='||kccdivts); RETURN; -- obsolete record from a backup controlfile END IF; IF (bck_type NOT IN ('D','I','L') OR bck_type IS NULL) THEN raise_application_error(-20090, 'Invalid backup set type'); END IF; IF (incr_level NOT IN (0,1,2,3,4) OR (bck_type NOT IN ('D','I') AND incr_level <> 0)) THEN raise_application_error(-20091, 'Invalid backup set level'); END IF; BEGIN select rman_seq.nextval into newbskey from dual; -- insert the backup set with status 'D'. Backup piece resync will mark -- the backup set available if all pieces are found. INSERT INTO bs (bs_key, db_key, bs_recid, bs_stamp, set_stamp, set_count, bck_type, incr_level, pieces, start_time, completion_time, status, controlfile_included, input_file_scan_only, keep_options, keep_until, block_size, site_key, multi_section) VALUES (newbskey, this_db_key, bs_recid, bs_stamp, set_stamp, set_count, bck_type, incr_level, pieces, start_time, completion_time, 'D', decode(controlfile_included, 'SBY','STANDBY','YES','BACKUP','NONE'), input_file_scan_only, keep_options, keep_until, block_size, this_site_key, decode(multi_section,'YES','Y',null)); cntbs := cntbs + 1; updatebs(cntbs) := newbskey; EXCEPTION WHEN dup_val_on_index THEN deb('checkBackupSet - Inside dup_val_on_index exception'); -- backup set is already in rcvcat SELECT * INTO local FROM bs WHERE bs.db_key = this_db_key AND bs.set_stamp = checkBackupSet.set_stamp AND bs.set_count = checkBackupSet.set_count; -- Total pieces is a simple guess during backuppiece inspection. -- Update total pieces if this guess is greater than previous one. -- IF (pieces > local.pieces) THEN UPDATE bs SET bs.pieces = checkBackupSet.pieces WHERE bs.db_key = this_db_key AND bs.bs_key = local.bs_key; -- validate this backupset during sanityCheck cntbs:= cntbs + 1; updatebs(cntbs) := local.bs_key; END IF; -- Detect here if the backup set contains same signature or different -- If different throw raise_application_error -- start_time is based on set_stamp, hence the old condition is bogus... -- TODO -- If site_key is null, update with this_site_key IF local.site_key IS NULL THEN UPDATE bs SET site_key = this_site_key WHERE bs.db_key = this_db_key AND bs.bs_key = local.bs_key; END IF; END; END checkBackupSet; PROCEDURE endBackupSetResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN -- update high_bs_recid for the next resync UPDATE node SET high_bs_recid = last_bs_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_bs_recid := last_bs_recid; last_bs_recid := NULL; END endBackupSetResync; /*-------------------------* * Backup piece resync * *-------------------------*/ FUNCTION beginBackupPieceResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_bp_recid INTO last_bp_recid FROM node WHERE site_key = this_site_key; ELSE last_bp_recid := sessionWaterMarks.high_bp_recid; END IF; RETURN last_bp_recid; END beginBackupPieceResync; PROCEDURE deleteDuplicateBP(recid IN NUMBER, stamp IN NUMBER, bs_key IN NUMBER, device_type IN VARCHAR2, handle IN VARCHAR2) IS ldevice_type bp.device_type%TYPE; lhandle bp.device_type%TYPE; BEGIN ldevice_type := device_type; lhandle := handle; IF ldevice_type IS NULL OR lhandle IS NULL THEN BEGIN SELECT device_type, handle INTO ldevice_type, lhandle FROM BP WHERE bp.db_key = this_db_key AND bp_recid = recid AND bp_stamp = stamp AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))) AND deleteDuplicateBP.bs_key = bp.bs_key; EXCEPTION WHEN no_data_found THEN RETURN; WHEN too_many_rows THEN -- unique key is bs_key, recid, stamp RETURN; END; END IF; -- If there is a piece on same device_type and handle we assume that it -- was written over by current piece and we mark it deleted. -- mark the matching backup pieces as deleted in a loop, so we mark all -- their backup sets too. FOR bprec IN bpq(ldevice_type, lhandle, recid, stamp) LOOP DELETE bp WHERE bp.bp_key = bprec.bp_key; updateBackupSetRec(bprec.bs_key); -- update the backupset status END LOOP; END deleteDuplicateBP; PROCEDURE checkBackupPiece( bp_recid IN NUMBER ,bp_stamp IN NUMBER ,set_stamp IN NUMBER ,set_count IN NUMBER ,piece# IN NUMBER ,tag IN VARCHAR2 ,device_type IN VARCHAR2 ,handle IN VARCHAR2 ,comments IN VARCHAR2 ,media IN VARCHAR2 ,concur IN VARCHAR2 ,start_time IN DATE ,completion_time IN DATE ,status IN VARCHAR2 ,copy# IN NUMBER default 1 ,media_pool IN NUMBER default 0 ,bytes IN NUMBER default NULL ,is_recovery_dest_file IN VARCHAR2 default 'NO' ,rsr_recid IN NUMBER default NULL ,rsr_stamp IN NUMBER default NULL ,compressed IN VARCHAR2 default 'NO' ,encrypted IN VARCHAR2 default 'NO' ,backed_by_osb IN VARCHAR2 default 'NO' ) IS localbs bs%rowtype; localbp bp%rowtype; localrsr rsr%rowtype; BEGIN IF (last_bp_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (bp_recid < last_bp_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (bp_recid > last_bp_recid + 1) THEN -- there is gap in backup set records -- not sure what we should do here NULL; END IF; last_bp_recid := bp_recid; IF (bp_stamp < kccdivts) THEN deb('checkBackupPiece - ignoring record kccdivts='||kccdivts); RETURN; -- obsolete record from a backup controlfile END IF; -- bug 9576536 IF handle IS NULL AND status != 'D' THEN deb('checkBackupPiece - handle is null, ignore this row'); RETURN; END IF; -- IF (status = 'D') THEN -- RETURN; -- END IF; -- find the key, recid, and # of pieces for the backup set BEGIN SELECT * into localbs from bs WHERE bs.db_key = this_db_key AND bs.set_stamp = checkBackupPiece.set_stamp AND bs.set_count = checkBackupPiece.set_count; EXCEPTION WHEN no_data_found THEN IF status != 'D' THEN select rman_seq.nextval into localbs.bs_key from dual; INSERT INTO bs (bs_key, db_key, bs_recid, bs_stamp, set_stamp, set_count, bck_type, incr_level, pieces, start_time, completion_time, status, controlfile_included, site_key, multi_section) VALUES -- Since we do not know the bs_recid/bs_stamp of the bs record, just -- use 0 for bs_recid and rman_seq.nextval for bs_stamp. This pair -- is not a naturally occuring bs_recid/bs_stamp values, so -- this will serve to indicate how these records got inserted. -- There is no unique constraint on these columns, so using -- a constant here is OK. The: reason we do not use NULL is because -- old RMAN versions do not use a null indicator when they select -- this column. (localbs.bs_key, this_db_key, 0, rman_seq.nextval, checkBackupPiece.set_stamp, checkBackupPiece.set_count, NULL, NULL, checkBackupPiece.piece#, checkBackupPiece.start_time, checkBackupPiece.completion_time, 'O', 'NONE', this_site_key, NULL); cntbs := cntbs + 1; updatebs(cntbs) := localbs.bs_key; ELSE -- no backupset records available RETURN; END IF; END; -- backupset records status are updated at end of resync during -- sanityCheck. So, why go beyond this point if status is 'D' ???? IF (status = 'D') THEN cntbs:= cntbs + 1; updatebs(cntbs) := localbs.bs_key; RETURN; END IF; -- got inserted by no_data_found exception IF (localbs.bs_recid is null OR localbs.bs_recid = 0) AND checkBackupPiece.piece# > localbs.pieces THEN -- update those bs records created in the above no_data_found exception -- everytime checkBackupPiece is called UPDATE bs SET bs.pieces = checkBackupPiece.piece# WHERE bs.bs_key = localbs.bs_key AND bs.bck_type IS NULL; END IF; -- Find the owning rsr row and get its key. BEGIN SELECT rsr_key INTO localrsr.rsr_key FROM rsr WHERE rsr.dbinc_key = this_dbinc_key AND (rsr.site_key = this_site_key OR rsr.site_key is null AND this_site_key is NULL) AND rsr.rsr_stamp = checkBackupPiece.rsr_stamp AND rsr.rsr_recid = checkBackupPiece.rsr_recid; EXCEPTION WHEN no_data_found THEN -- no rsr record avaiable - ignore NULL; END; BEGIN INSERT INTO bp (bp_key, bs_key, piece#, db_key, bp_recid, bp_stamp, tag, device_type, copy#, handle, handle_hashkey, comments, media, media_pool, concur, start_time, completion_time, status, bytes, is_recovery_dest_file, rsr_key, compressed, site_key, encrypted, backed_by_osb) VALUES (rman_seq.nextval, localbs.bs_key, piece#, this_db_key, bp_recid, bp_stamp, tag, device_type, copy#, handle, substr(device_type,1,10)||substr(handle,1,10)||substr(handle,-10), comments, media, media_pool, decode(concur,'YES','Y','NO','N'), start_time, completion_time, status, bytes, is_recovery_dest_file, localrsr.rsr_key, compressed, this_site_key, decode(encrypted, 'YES', 'Y', 'N'), decode(backed_by_osb, 'YES', 'Y', 'N')); deleteDuplicateBP(bp_recid, bp_stamp, localbs.bs_key, device_type, handle); -- validate the backup set. updateBackupSetRec(localbs.bs_key); EXCEPTION WHEN dup_val_on_index THEN deb('checkBackupPiece - Inside dup_val_on_index exception'); -- check if the backup piece record is already in rcvcat -- must get one record ... SELECT * INTO localbp FROM bp WHERE bp.bs_key = localbs.bs_key AND bp.bp_recid = checkBackupPiece.bp_recid AND bp.bp_stamp = checkBackupPiece.bp_stamp; -- change stamp and resync record, if client is site aware... IF client_site_aware AND this_site_key <> localbp.site_key THEN raise_application_error(-20081, 'change stamp for the record'); END IF; -- check the piece# IF (piece# <> localbp.piece#) THEN raise_application_error(-20093, 'Invalid piece#'); END IF; -- If site_key is null, update with this_site_key IF localbp.site_key IS NULL THEN UPDATE bp SET site_key = this_site_key WHERE bp.bs_key = localbs.bs_key AND bp.bp_recid = checkBackupPiece.bp_recid AND bp.bp_stamp = checkBackupPiece.bp_stamp; END IF; END; END checkBackupPiece; PROCEDURE endBackupPieceResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_bp_recid = last_bp_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_bp_recid := last_bp_recid; last_bp_recid := NULL; END endBackupPieceResync; /*-------------------------* * Backup Datafile resync * *-------------------------*/ PROCEDURE addBackupControlFile( bs_key IN NUMBER ,bcf_recid IN NUMBER ,bcf_stamp IN NUMBER ,dbinc_key IN NUMBER ,ckp_scn IN NUMBER ,ckp_time IN DATE ,create_time IN DATE ,min_offr_recid IN NUMBER ,blocks IN NUMBER ,block_size IN NUMBER ,controlfile_type IN VARCHAR2 ,cfile_abck_year IN number ,cfile_abck_mon_day IN number ,cfile_abck_seq IN number ) IS local bcf%rowtype; BEGIN BEGIN INSERT INTO bcf(bcf_key, bs_key, dbinc_key, bcf_recid, bcf_stamp, ckp_scn, ckp_time, create_time, min_offr_recid, block_size, controlfile_type, blocks, autobackup_date, autobackup_sequence) VALUES (rman_seq.nextval, bs_key, dbinc_key, bcf_recid, bcf_stamp, ckp_scn, ckp_time, create_time, min_offr_recid,block_size, controlfile_type, blocks, decode(cfile_abck_year, 0, to_date(NULL), to_date(to_char(cfile_abck_year)|| lpad(to_char(cfile_abck_mon_day), 4, '0'), 'YYYYMMDD', 'NLS_CALENDAR=Gregorian')), cfile_abck_seq); EXCEPTION WHEN dup_val_on_index THEN deb('addBackupControlfile - Inside dup_val_on_index exception'); -- this backup controlfile record already exists in rcvcat, must find -- record. SELECT ckp_scn, ckp_time, bcf_recid, bcf_stamp INTO local.ckp_scn, local.ckp_time, local.bcf_recid, local.bcf_stamp FROM bcf WHERE bcf.bs_key = addBackupControlFile.bs_key; -- check the ckp_scn and ckp_time IF (ckp_scn <> local.ckp_scn or ckp_time <> local.ckp_time) THEN deb('addBackupControlfile - ckp_scn '||ckp_scn||' ckp_time '|| to_char(ckp_time)); deb('addBackupControlfile - lckp_scn '||local.ckp_scn||' lckp_time '|| to_char(local.ckp_time)); raise_application_error(-20095, 'Invalid ckp_scn or ckp_time'); END IF; -- fix recid/stamp if they do not match IF local.bcf_recid <> bcf_recid or local.bcf_stamp <> bcf_stamp THEN UPDATE bcf set bcf_recid = addBackupControlFile.bcf_recid, bcf_stamp = addBackupControlFile.bcf_stamp WHERE bcf.bs_key = addBackupControlFile.bs_key; END IF; END; END addBackupControlFile; PROCEDURE addBackupDataFile( bs_key IN NUMBER ,bdf_recid IN NUMBER ,bdf_stamp IN NUMBER ,file# IN NUMBER ,create_scn IN NUMBER ,dbinc_key IN NUMBER ,incr_level IN NUMBER ,incr_scn IN NUMBER ,ckp_scn IN NUMBER ,ckp_time IN DATE ,abs_fuzzy_scn IN NUMBER ,datafile_blocks IN NUMBER ,blocks IN NUMBER ,block_size IN NUMBER ,completion_time IN DATE ,blocks_read IN NUMBER ,create_time IN DATE ,marked_corrupt IN NUMBER ,used_chg_track IN VARCHAR2 ,used_optim IN VARCHAR2 ,foreign_dbid IN number ,plugged_readonly IN varchar2 ,plugin_scn IN number ,plugin_reset_scn IN number ,plugin_reset_time IN date ,section_size IN number ) IS local bdf%rowtype; BEGIN BEGIN INSERT INTO bdf(bdf_key, dbinc_key, bdf_recid, bdf_stamp, bs_key, file#, create_scn, incr_level, incr_scn, ckp_scn, ckp_time, abs_fuzzy_scn, datafile_blocks, blocks, block_size, completion_time, blocks_read, create_time, marked_corrupt, used_chg_track, used_optim, foreign_dbid, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time, section_size) VALUES (rman_seq.nextval, dbinc_key, bdf_recid, bdf_stamp, bs_key, file#, create_scn, incr_level, incr_scn, ckp_scn,ckp_time, abs_fuzzy_scn, datafile_blocks, blocks, block_size, completion_time, nvl(blocks_read, datafile_blocks), create_time, marked_corrupt, decode(used_chg_track, 'YES', 'Y', 'N'), decode(used_optim, 'YES', 'Y', 'N'), foreign_dbid, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time, section_size); EXCEPTION WHEN dup_val_on_index THEN deb('addBackupDatafile - Inside dup_val_on_index exception'); -- this backup datafile record already exist in rcvcat -- must exist a record with same create_scn SELECT dbinc_key, create_scn, bdf_recid, bdf_stamp, plugin_scn INTO local.dbinc_key, local.create_scn,local.bdf_recid, local.bdf_stamp, local.plugin_scn FROM bdf WHERE bdf.bs_key = addBackupDataFile.bs_key AND bdf.file# = addBackupDataFile.file#; -- check the dbinc_key and creation scn IF (dbinc_key <> local.dbinc_key) THEN raise_application_error(-20096, 'Invalid dbinc_key'); END IF; IF (create_scn <> local.create_scn AND plugin_scn <> local.plugin_scn) THEN raise_application_error(-20097, 'Invalid create scn'); END IF; -- fix recid/stamp if they do not match IF bdf_recid <> local.bdf_recid or bdf_stamp <> local.bdf_stamp THEN UPDATE bdf set bdf_recid = addBackupDataFile.bdf_recid, bdf_stamp = addBackupDataFile.bdf_stamp WHERE bdf.bs_key = addBackupDataFile.bs_key; END IF; END; END addBackupDataFile; FUNCTION beginBackupDataFileResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_bdf_recid INTO last_bdf_recid FROM node WHERE site_key = this_site_key; ELSE last_bdf_recid := sessionWaterMarks.high_bdf_recid; END IF; RETURN last_bdf_recid; END beginBackupDataFileResync; PROCEDURE checkBackupDataFile( bdf_recid IN NUMBER ,bdf_stamp IN NUMBER ,set_stamp IN NUMBER ,set_count IN NUMBER ,file# IN NUMBER ,create_scn IN NUMBER ,create_time IN DATE ,reset_scn IN NUMBER ,reset_time IN DATE ,incr_level IN NUMBER ,incr_scn IN NUMBER ,ckp_scn IN NUMBER ,ckp_time IN DATE ,abs_fuzzy_scn IN NUMBER ,datafile_blocks IN NUMBER ,blocks IN NUMBER ,block_size IN NUMBER ,min_offr_recid IN NUMBER ,completion_time IN DATE ,controlfile_type IN VARCHAR2 DEFAULT NULL ,cfile_abck_year IN NUMBER DEFAULT NULL -- contains marked_corrupt for datafiles ,cfile_abck_mon_day IN NUMBER DEFAULT NULL -- contains media_corrupt for datafiles ,cfile_abck_seq IN NUMBER DEFAULT NULL -- contains logical_corrupt for datafiles ,chk_last_recid IN BOOLEAN DEFAULT TRUE ,blocks_read IN NUMBER DEFAULT NULL ,used_chg_track IN VARCHAR2 DEFAULT 'NO' ,used_optim IN VARCHAR2 DEFAULT 'NO' ,foreign_dbid IN number DEFAULT 0 ,plugged_readonly IN varchar2 DEFAULT 'NO' ,plugin_scn IN number DEFAULT 0 ,plugin_reset_scn IN number DEFAULT 0 ,plugin_reset_time IN date DEFAULT NULL ,section_size IN number DEFAULT NULL ) IS bs_key NUMBER; dbinc_key NUMBER; BEGIN -- chk_last_recid is FALSE during bdfbp resync (see recover.txt) IF chk_last_recid THEN IF (last_bdf_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (bdf_recid < last_bdf_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (bdf_recid > last_bdf_recid + 1) THEN -- there is gap in backup set records NULL; END IF; last_bdf_recid := bdf_recid; END IF; IF (bdf_stamp < kccdivts) THEN deb('In checkBackupDatafile, ignoring record kccdivts='||kccdivts); RETURN; -- obsolete record from a backup controlfile END IF; -- Write a seperate function to return backup set key by taking -- set_stamp and set_count... take into account the node specific info -- in that function. TODO -- find the key of the backup set BEGIN SELECT bs_key INTO bs_key FROM bs WHERE bs.db_key = this_db_key AND bs.set_stamp = checkBackupDataFile.set_stamp AND bs.set_count = checkBackupDataFile.set_count; EXCEPTION WHEN no_data_found THEN -- Bug 1467871: bs_key should be inserted either in -- checkBackupSet (or) checkBackupPiece -- Exception would occur only when backupset records and -- backuppiece records ages out. Ignore silently these -- orphaned backup datafile records rather than inserting a 'D' -- record in BS table which will create dummy records with -- 8.1.6- version of RMAN and this catalog version. return; END; BEGIN -- update only those bs records created in no_data_found exception -- of checkBackupPiece, checkBackupDataFile and checkBackupRedoLog IF (checkBackupDatafile.incr_level > 0) THEN UPDATE bs SET bs.incr_level = checkBackupDataFile.incr_level, bs.bck_type = 'I' WHERE bs.bs_key = checkBackupDataFile.bs_key AND bs.bck_type IS NULL; ELSE UPDATE bs SET bs.incr_level = checkBackupDataFile.incr_level, bs.bck_type = 'D' WHERE bs.bs_key = checkBackupDataFile.bs_key AND bs.bck_type IS NULL; END IF; IF (file# = 0 and controlfile_type is not null) then UPDATE bs SET bs.controlfile_included= decode(checkBackupDatafile.controlfile_type,'B','BACKUP', 'S','STANDBY', 'NONE') WHERE bs.bs_key = checkBackupDataFile.bs_key AND bs.controlfile_included = 'NONE'; END IF; END; -- find the database incarnation key dbinc_key := checkIncarnation(reset_scn, reset_time); IF (file# = 0) THEN addBackupControlFile(bs_key, bdf_recid, bdf_stamp, dbinc_key, ckp_scn, ckp_time, create_time, min_offr_recid, blocks, block_size, controlfile_type, cfile_abck_year, cfile_abck_mon_day, cfile_abck_seq); ELSE addBackupDataFile(bs_key, bdf_recid, bdf_stamp, file#, create_scn, dbinc_key, incr_level, incr_scn, ckp_scn, ckp_time, abs_fuzzy_scn, datafile_blocks, blocks, block_size, completion_time, blocks_read, create_time, cfile_abck_year, used_chg_track, used_optim, foreign_dbid, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time, section_size); END IF; END checkBackupDataFile; PROCEDURE endBackupDataFileResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_bdf_recid = last_bdf_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_bdf_recid := last_bdf_recid; last_bdf_recid := NULL; END endBackupDataFileResync; /*-----------------------* * Backup SPFILE resync * *-----------------------*/ FUNCTION beginBackupSpFileResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_bsf_recid INTO last_bsf_recid FROM node WHERE site_key = this_site_key; ELSE last_bsf_recid := sessionWaterMarks.high_bsf_recid; END IF; RETURN last_bsf_recid; END beginBackupSpFileResync; PROCEDURE addBackupSpFile( bs_key IN NUMBER ,bsf_recid IN NUMBER ,bsf_stamp IN NUMBER ,modification_time IN DATE ,bytes IN NUMBER ,db_unique_name IN VARCHAR2 ) IS local bsf%rowtype; BEGIN deb('addBackupSpfile'); INSERT INTO bsf(bsf_key, bs_key, db_key, bsf_recid, bsf_stamp, modification_time, bytes, db_unique_name) VALUES (rman_seq.nextval, bs_key, this_db_key, bsf_recid, bsf_stamp, modification_time, bytes, db_unique_name); EXCEPTION WHEN dup_val_on_index THEN deb('addBackupSpfile - Inside dup_val_on_index exception'); -- this backup SPFILE record already exists in rcvcat -- must find a record with same backup set... SELECT * INTO local FROM bsf WHERE bsf.bs_key = addBackupSpFile.bs_key; -- check if the modification time differs IF (modification_time <> local.modification_time) THEN raise_application_error(-20101, 'Invalid modification_time'); END IF; -- check if the site_key differs IF (db_unique_name <> local.db_unique_name) THEN raise_application_error(-20101, 'Invalid db_unique_name=' || db_unique_name || 'expected db_unique_name=' || local.db_unique_name); END IF; -- fix recid/stamp if they do not match IF local.bsf_recid <> bsf_recid or local.bsf_stamp <> bsf_stamp THEN UPDATE bsf set bsf_recid = addBackupSpFile.bsf_recid, bsf_stamp = addBackupSpFile.bsf_stamp WHERE bsf.bs_key = addBackupSpFile.bs_key; END IF; END addBackupSpFile; PROCEDURE checkBackupSpFile( bsf_recid IN NUMBER ,bsf_stamp IN NUMBER ,set_stamp IN NUMBER ,set_count IN NUMBER ,modification_time IN DATE ,bytes IN NUMBER ,chk_last_recid IN BOOLEAN default TRUE ,db_unique_name IN varchar2 DEFAULT NULL ) IS bs_key NUMBER; site_key NUMBER; BEGIN IF chk_last_recid THEN IF (last_bsf_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (bsf_recid < last_bsf_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (bsf_recid > last_bsf_recid + 1) THEN -- there is gap in backup set records NULL; END IF; last_bsf_recid := bsf_recid; END IF; -- find the key of the backup set BEGIN SELECT bs_key INTO bs_key FROM bs WHERE bs.db_key = this_db_key AND bs.set_stamp = checkBackupSpFile.set_stamp AND bs.set_count = checkBackupSpFile.set_count; EXCEPTION WHEN no_data_found THEN return; END; addBackupSpFile(bs_key, bsf_recid, bsf_stamp, modification_time, bytes, db_unique_name); END checkBackupSpFile; PROCEDURE endBackupSpFileResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_bsf_recid = last_bsf_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_bsf_recid := last_bsf_recid; last_bsf_recid := NULL; END endBackupSpFileResync; /*-------------------------* * Backup Redo Log resync * *-------------------------*/ FUNCTION beginBackupRedoLogResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_brl_recid INTO last_brl_recid FROM node WHERE site_key = this_site_key; ELSE last_brl_recid := sessionWaterMarks.high_brl_recid; END IF; RETURN last_brl_recid; END beginBackupRedoLogResync; PROCEDURE checkBackupRedoLog( brl_recid IN NUMBER ,brl_stamp IN NUMBER ,set_stamp IN NUMBER ,set_count IN NUMBER ,thread# IN NUMBER ,sequence# IN NUMBER ,reset_scn IN NUMBER ,reset_time IN DATE ,low_scn IN NUMBER ,low_time IN DATE ,next_scn IN NUMBER ,next_time IN DATE ,blocks IN NUMBER ,block_size IN NUMBER ,chk_last_recid IN BOOLEAN DEFAULT TRUE ,terminal IN VARCHAR2 DEFAULT 'NO' ) IS local brl%rowtype; bskeep number; BEGIN -- ignore BRL entries with zero timestamp, they are added due to bug 5971763 -- when user is doing backup of logs and multiple archive log destination -- is set. IF brl_stamp = 0 THEN deb('checkBackupRedoLog: ignoring this record as brl_stamp is 0'); RETURN; END IF; -- chk_last_recid is FALSE during brlbp resync (see recover.txt) IF chk_last_recid THEN IF (last_brl_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (brl_recid < last_brl_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (brl_recid > last_brl_recid + 1) THEN -- there is gap in backup set records -- not sure what we should do here NULL; END IF; last_brl_recid := brl_recid; END IF; IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; -- find the key of the backup set BEGIN SELECT bs_key,keep_options INTO local.bs_key, bskeep FROM bs WHERE bs.db_key = this_db_key AND bs.set_stamp = checkBackupRedoLog.set_stamp AND bs.set_count = checkBackupRedoLog.set_count; EXCEPTION WHEN no_data_found THEN RETURN; END; BEGIN -- update only those bs records created in no_data_found exception -- of checkBackupPiece, checkBackupDataFile and checkBackupRedoLog UPDATE bs SET bs.bck_type = 'L' WHERE bs.bs_key = local.bs_key AND bs.bck_type IS NULL; END; -- find the dbinc_key local.dbinc_key := checkIncarnation(reset_scn, reset_time); BEGIN INSERT INTO brl (brl_key, dbinc_key, brl_recid, brl_stamp, thread#, sequence#, low_scn, low_time, next_scn, next_time, blocks, block_size, bs_key, terminal) VALUES (rman_seq.nextval, local.dbinc_key, brl_recid, brl_stamp, thread#, sequence#, low_scn, low_time, next_scn, next_time, blocks, block_size, local.bs_key, terminal); EXCEPTION WHEN dup_val_on_index THEN deb('checkBackupRedoLog - Inside dup_val_on_index exception'); -- the backup redo log record already exists -- must get one record SELECT low_scn, brl_recid, brl_stamp INTO local.low_scn, local.brl_recid, local.brl_stamp FROM brl WHERE brl.bs_key = local.bs_key AND brl.thread# = checkBackupRedoLog.thread# AND brl.sequence# = checkBackupRedoLog.sequence#; -- check the low_scn IF (low_scn <> local.low_scn) THEN raise_application_error(-20098, 'Invalid low scn'); END IF; -- fix recid/stamp if they do not match IF local.brl_recid <> brl_recid or local.brl_stamp <> brl_stamp THEN UPDATE brl set brl_recid = checkBackupRedoLog.brl_recid, brl_stamp = checkBackupRedoLog.brl_stamp WHERE brl.bs_key = local.bs_key AND brl.thread# = checkBackupRedoLog.thread# AND brl.sequence# = checkBackupRedoLog.sequence#; END IF; END; IF (bskeep > 0) THEN updateRestorePoint(low_scn, next_scn); END IF; END checkBackupRedoLog; PROCEDURE endBackupRedoLogResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_brl_recid = last_brl_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_brl_recid := last_brl_recid; last_brl_recid := NULL; END endBackupRedoLogResync; /*----------------------------* * Datafile Copy resync * *----------------------------*/ PROCEDURE deleteDuplicateCCF(recid IN NUMBER, stamp IN NUMBER, fname IN VARCHAR2) IS lfname ccf.fname%TYPE; BEGIN lfname := fname; IF lfname IS NULL THEN BEGIN SELECT fname INTO lfname FROM ccf WHERE ccf.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND ccf_recid = recid AND ccf_stamp = stamp; EXCEPTION WHEN no_data_found THEN RETURN; WHEN too_many_rows THEN -- unique_key is dbinc_key, recid and stamp RETURN; END; END IF; -- Delete old copies as the new copy should have overwritten it DELETE ccf WHERE ccf.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND ccf.fname = lfname AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(ccf.site_key, this_site_key))) AND ccf.fname_hashkey = substr(lfname, 1, 10) || substr(lfname, -10) AND NOT (ccf.ccf_recid = recid AND ccf.ccf_stamp = stamp); END deleteDuplicateCCF; PROCEDURE addControlFileCopy( ccf_recid IN NUMBER ,ccf_stamp IN NUMBER ,fname IN VARCHAR2 ,tag IN VARCHAR2 ,dbinc_key IN NUMBER ,ckp_scn IN NUMBER ,ckp_time IN DATE ,create_time IN DATE ,min_offr_recid IN NUMBER ,block_size IN NUMBER ,completion_time IN DATE ,status IN VARCHAR2 ,controlfile_type IN VARCHAR2 DEFAULT NULL ,keep_options IN NUMBER DEFAULT 0 ,keep_until IN DATE DEFAULT NULL ,is_recovery_dest_file IN VARCHAR2 ,rsr_key IN NUMBER DEFAULT NULL ,blocks IN NUMBER ) IS local ccf%rowtype; BEGIN BEGIN IF (status <> 'D') THEN INSERT INTO ccf(ccf_key, dbinc_key, ccf_recid, ccf_stamp, fname, fname_hashkey, tag, ckp_scn, ckp_time, create_time, min_offr_recid, block_size, completion_time, status, controlfile_type, keep_options, keep_until, is_recovery_dest_file, rsr_key, blocks, site_key) VALUES (rman_seq.nextval, dbinc_key, ccf_recid, ccf_stamp, fname, substr(fname,1,10)||substr(fname,-10), tag, ckp_scn, ckp_time, create_time, min_offr_recid, block_size, completion_time, status, controlfile_type, keep_options, keep_until, is_recovery_dest_file, rsr_key, blocks, this_site_key); deleteDuplicateCCF(ccf_recid, ccf_stamp, fname); END IF; EXCEPTION WHEN dup_val_on_index THEN deb('addControlFileCopy - Inside dup_val_on_index exception'); -- The controlfile copy exists already. SELECT * INTO local FROM ccf WHERE ccf.dbinc_key = addControlFileCopy.dbinc_key AND ccf.ccf_recid = addControlFileCopy.ccf_recid AND ccf.ccf_stamp = addControlFileCopy.ccf_stamp; -- change stamp and resync record, if client is site aware... IF client_site_aware AND this_site_key <> local.site_key THEN raise_application_error(-20081, 'change stamp for the record'); END IF; -- check the ckp_scn IF (ckp_scn <> local.ckp_scn) THEN raise_application_error(-20095, 'Invalid ckp_scn'); END IF; -- If site_key is null, update with this_site_key IF local.site_key IS NULL THEN UPDATE ccf SET site_key = this_site_key WHERE ccf.dbinc_key = addControlFileCopy.dbinc_key AND ccf.ccf_recid = addControlFileCopy.ccf_recid AND ccf.ccf_stamp = addControlFileCopy.ccf_stamp; END IF; END; END addControlFileCopy; PROCEDURE deleteDuplicateCDF(recid IN NUMBER, stamp IN NUMBER, fname IN VARCHAR2) IS lfname cdf.fname%TYPE; BEGIN lfname := fname; IF lfname IS NULL THEN BEGIN SELECT fname INTO lfname FROM cdf WHERE cdf.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND cdf_recid = recid AND cdf_stamp = stamp; EXCEPTION WHEN no_data_found THEN RETURN; WHEN too_many_rows THEN -- unique_key is dbinc_key, recid and stamp RETURN; END; END IF; -- Delete old copies as the new copy should have overwritten it DELETE cdf WHERE cdf.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND cdf.fname = lfname AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(cdf.site_key, this_site_key))) AND cdf.fname_hashkey = substr(lfname, 1, 10) || substr(lfname, -10) AND NOT (cdf.cdf_recid = recid AND cdf.cdf_stamp = stamp); END deleteDuplicateCDF; PROCEDURE addDataFileCopy( cdf_recid IN NUMBER ,cdf_stamp IN NUMBER ,fname IN VARCHAR2 ,tag IN VARCHAR2 ,file# IN NUMBER ,create_scn IN NUMBER ,dbinc_key IN NUMBER ,incr_level IN NUMBER ,ckp_scn IN NUMBER ,ckp_time IN DATE ,onl_fuzzy IN VARCHAR2 ,bck_fuzzy IN VARCHAR2 ,abs_fuzzy_scn IN NUMBER ,rcv_fuzzy_scn IN NUMBER ,rcv_fuzzy_time IN DATE ,blocks IN NUMBER ,block_size IN NUMBER ,completion_time IN DATE ,status IN VARCHAR2 ,keep_options IN NUMBER ,keep_until IN DATE ,scanned IN VARCHAR2 ,is_recovery_dest_file IN VARCHAR2 ,rsr_key IN NUMBER ,create_time IN DATE ,marked_corrupt IN NUMBER ,foreign_dbid IN number ,plugged_readonly IN varchar2 ,plugin_scn IN number ,plugin_reset_scn IN number ,plugin_reset_time IN date ) IS local cdf%rowtype; BEGIN BEGIN IF (status <> 'D') THEN INSERT INTO cdf(cdf_key, dbinc_key, cdf_recid, cdf_stamp, file#, create_scn, fname, fname_hashkey, tag, incr_level, ckp_scn, ckp_time, onl_fuzzy, bck_fuzzy, abs_fuzzy_scn, rcv_fuzzy_scn, rcv_fuzzy_time, blocks, block_size, completion_time, status, keep_options, keep_until, scanned, is_recovery_dest_file, rsr_key, create_time, marked_corrupt, site_key, foreign_dbid, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time) VALUES (rman_seq.nextval, dbinc_key, cdf_recid, cdf_stamp, file#, create_scn, fname, substr(fname,1,10)||substr(fname, -10), tag, incr_level, ckp_scn, ckp_time, decode(onl_fuzzy,'YES','Y','NO','N'), decode(bck_fuzzy,'YES','Y','NO','N'), abs_fuzzy_scn, rcv_fuzzy_scn, rcv_fuzzy_time, blocks, block_size, completion_time, status, keep_options, keep_until, decode(scanned,'YES','Y','NO','N'), is_recovery_dest_file, rsr_key, create_time, marked_corrupt, this_site_key, foreign_dbid, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time); deleteDuplicateCDF(cdf_recid, cdf_stamp, fname); END IF; EXCEPTION WHEN dup_val_on_index THEN deb('addDataFileCopy - Inside dup_val_on_index exception'); SELECT * INTO local FROM cdf WHERE cdf.dbinc_key = addDataFileCopy.dbinc_key AND cdf.cdf_recid = addDataFileCopy.cdf_recid AND cdf.cdf_stamp = addDataFileCopy.cdf_stamp; -- change stamp and resync record, if client is site aware... IF client_site_aware AND this_site_key <> local.site_key THEN raise_application_error(-20081, 'change stamp for the record'); END IF; -- check the file# and creation scn IF (file# <> local.file#) THEN raise_application_error(-20096, 'Invalid file'); END IF; IF (create_scn <> local.create_scn AND plugin_scn <> local.plugin_scn) THEN raise_application_error(-20097, 'Invalid create scn'); END IF; -- If site_key is null, update with this_site_key IF local.site_key IS NULL THEN UPDATE cdf SET site_key = this_site_key WHERE cdf.dbinc_key = addDataFileCopy.dbinc_key AND cdf.cdf_recid = addDataFileCopy.cdf_recid AND cdf.cdf_stamp = addDataFileCopy.cdf_stamp; END IF; END; END addDataFileCopy; FUNCTION beginDataFileCopyResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_cdf_recid INTO last_cdf_recid FROM node WHERE site_key = this_site_key; ELSE last_cdf_recid := sessionWaterMarks.high_cdf_recid; END IF; RETURN last_cdf_recid; END beginDataFileCopyResync; PROCEDURE checkDataFileCopy( cdf_recid IN NUMBER ,cdf_stamp IN NUMBER ,fname IN VARCHAR2 ,tag IN VARCHAR2 ,file# IN NUMBER ,create_scn IN NUMBER ,create_time IN DATE ,reset_scn IN NUMBER ,reset_time IN DATE ,incr_level IN NUMBER ,ckp_scn IN NUMBER ,ckp_time IN DATE ,onl_fuzzy IN VARCHAR2 ,bck_fuzzy IN VARCHAR2 ,abs_fuzzy_scn IN NUMBER ,rcv_fuzzy_scn IN NUMBER ,rcv_fuzzy_time IN DATE ,blocks IN NUMBER ,block_size IN NUMBER ,min_offr_recid IN NUMBER ,completion_time IN DATE ,status IN VARCHAR2 ,controlfile_type IN VARCHAR2 DEFAULT NULL ,keep_options IN NUMBER DEFAULT 0 ,keep_until IN DATE DEFAULT NULL ,scanned IN VARCHAR2 DEFAULT 'NO' ,is_recovery_dest_file IN VARCHAR2 DEFAULT 'NO' ,rsr_recid IN number DEFAULT NULL ,rsr_stamp IN number DEFAULT NULL ,marked_corrupt IN number DEFAULT NULL ,foreign_dbid IN number DEFAULT 0 ,plugged_readonly IN varchar2 DEFAULT 'NO' ,plugin_scn IN number DEFAULT 0 ,plugin_reset_scn IN number DEFAULT 0 ,plugin_reset_time IN date DEFAULT NULL ) IS dbinc_key NUMBER; localrsr rsr%rowtype; BEGIN IF (last_cdf_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (cdf_recid < last_cdf_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (cdf_recid > last_cdf_recid + 1) THEN -- there is gap in backup set records -- not sure what we should do here NULL; END IF; last_cdf_recid := cdf_recid; IF (cdf_stamp < kccdivts) THEN deb('checkBackupDatafileCopy - ignoring record kccdivts='||kccdivts); RETURN; -- obsolete record from a backup controlfile END IF; -- find the dbinc_key dbinc_key := checkIncarnation(reset_scn, reset_time); -- Find the owning rsr row and get its key. BEGIN SELECT rsr_key INTO localrsr.rsr_key FROM rsr WHERE rsr.dbinc_key = this_dbinc_key AND (rsr.site_key = this_site_key OR rsr.site_key is null AND this_site_key is null) AND rsr.rsr_stamp = checkDataFileCopy.rsr_stamp AND rsr.rsr_recid = checkDataFileCopy.rsr_recid; EXCEPTION WHEN no_data_found THEN -- no rsr record avaiable - ignore NULL; END; IF (file# = 0) THEN addControlFileCopy(cdf_recid, cdf_stamp, fname, tag, dbinc_key, ckp_scn, ckp_time, create_time, min_offr_recid, block_size, completion_time, status, controlfile_type, keep_options, keep_until, is_recovery_dest_file, localrsr.rsr_key, blocks); ELSE addDataFileCopy(cdf_recid, cdf_stamp, fname, tag, file#, create_scn, dbinc_key, incr_level, ckp_scn, ckp_time, onl_fuzzy, bck_fuzzy, abs_fuzzy_scn, rcv_fuzzy_scn, rcv_fuzzy_time, blocks, block_size, completion_time, status, keep_options, keep_until, scanned, is_recovery_dest_file, localrsr.rsr_key, create_time, marked_corrupt, foreign_dbid, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time); END IF; END checkDataFileCopy; PROCEDURE endDataFileCopyResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_cdf_recid = last_cdf_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_cdf_recid := last_cdf_recid; last_cdf_recid := NULL; END endDataFileCopyResync; /*----------------------------* * Proxy Datafile resync * *----------------------------*/ PROCEDURE deleteDuplicateXCF(recid IN NUMBER, stamp IN NUMBER, device_type IN VARCHAR2, handle IN VARCHAR2) IS lhandle xcf.handle%TYPE; ldevice_type xcf.device_type%TYPE; BEGIN lhandle := handle; IF lhandle IS NULL OR ldevice_type IS NULL THEN BEGIN SELECT handle, device_type INTO lhandle, ldevice_type FROM xcf WHERE xcf.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND xcf_recid = recid AND xcf_stamp = stamp; EXCEPTION WHEN no_data_found THEN RETURN; WHEN too_many_rows THEN -- unique_key is dbinc_key, recid and stamp RETURN; END; END IF; -- Delete old copies as the new copy should have overwritten it DELETE xcf WHERE xcf.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND xcf.device_type = ldevice_type AND xcf.handle = lhandle AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xcf.site_key, this_site_key))) AND xcf.handle_hashkey = substr(ldevice_type, 1, 10) || substr(lhandle, 1, 10) || substr(lhandle, -10) AND NOT (xcf.xcf_recid = recid AND xcf.xcf_stamp = stamp); END deleteDuplicateXCF; PROCEDURE addProxyControlFile( dbinc_key IN NUMBER ,xcf_recid IN NUMBER ,xcf_stamp IN NUMBER ,tag IN VARCHAR2 ,ckp_scn IN NUMBER ,ckp_time IN DATE ,create_time IN DATE ,min_offr_recid IN NUMBER ,block_size IN NUMBER ,device_type IN VARCHAR2 ,handle IN VARCHAR2 ,comments IN VARCHAR2 ,media IN VARCHAR2 ,media_pool IN NUMBER ,start_time IN VARCHAR2 ,completion_time IN DATE ,status IN VARCHAR2 ,controlfile_type IN VARCHAR2 ,keep_options IN NUMBER ,keep_until IN DATE ,rsr_key IN NUMBER ,blocks IN NUMBER ) IS local xcf%rowtype; BEGIN BEGIN IF (status <> 'D') THEN INSERT INTO xcf(xcf_key, dbinc_key, xcf_recid, xcf_stamp, tag, ckp_scn, ckp_time, create_time, min_offr_recid, block_size, device_type, handle, handle_hashkey, comments, media, media_pool, start_time, completion_time, status, controlfile_type, keep_options, keep_until, rsr_key, site_key) VALUES (rman_seq.nextval, dbinc_key, xcf_recid, xcf_stamp, tag, ckp_scn, ckp_time, create_time, min_offr_recid, block_size, device_type, handle, substr(device_type,1,10)||substr(handle,1,10)||substr(handle,-10), comments, media, media_pool, start_time, completion_time, status, controlfile_type, keep_options, keep_until, rsr_key, this_site_key); deleteDuplicateXCF(xcf_recid, xcf_stamp, device_type, handle); END IF; EXCEPTION WHEN dup_val_on_index THEN deb('addProxyControlFile - Inside dup_val_on_index exception'); -- this proxy controlfile backup already exists in the recovery catalog SELECT * INTO local FROM xcf WHERE xcf.dbinc_key = addProxyControlFile.dbinc_key AND xcf.xcf_recid = addProxyControlFile.xcf_recid AND xcf.xcf_stamp = addProxyControlFile.xcf_stamp; -- change stamp and resync record, if client is site aware... IF client_site_aware AND this_site_key <> local.site_key THEN raise_application_error(-20081, 'change stamp for the record'); END IF; -- check the ckp_scn IF (ckp_scn <> local.ckp_scn) THEN raise_application_error(-20095, 'Invalid ckp_scn'); END IF; -- If site_key is null, update with this_site_key IF local.site_key IS NULL THEN UPDATE xcf SET site_key = this_site_key WHERE xcf.dbinc_key = addProxyControlFile.dbinc_key AND xcf.xcf_recid = addProxyControlFile.xcf_recid AND xcf.xcf_stamp = addProxyControlFile.xcf_stamp; END IF; END; END addProxyControlFile; PROCEDURE deleteDuplicateXDF(recid IN NUMBER, stamp IN NUMBER, device_type IN VARCHAR2, handle IN VARCHAR2) IS lhandle xdf.handle%TYPE; ldevice_type xdf.device_type%TYPE; BEGIN lhandle := handle; IF lhandle IS NULL OR ldevice_type IS NULL THEN BEGIN SELECT handle, device_type INTO lhandle, ldevice_type FROM xdf WHERE xdf.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND xdf_recid = recid AND xdf_stamp = stamp; EXCEPTION WHEN no_data_found THEN RETURN; WHEN too_many_rows THEN -- unique_key is dbinc_key, recid and stamp RETURN; END; END IF; -- Delete old copies as the new copy should have overwritten it DELETE xdf WHERE xdf.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND xdf.device_type = ldevice_type AND xdf.handle = lhandle AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xdf.site_key, this_site_key))) AND xdf.handle_hashkey = substr(ldevice_type, 1, 10) || substr(lhandle, 1, 10) || substr(lhandle, -10) AND NOT (xdf.xdf_recid = recid AND xdf.xdf_stamp = stamp); END deleteDuplicateXDF; PROCEDURE addProxyDataFile( dbinc_key IN NUMBER ,xdf_recid IN NUMBER ,xdf_stamp IN NUMBER ,tag IN VARCHAR2 ,file# IN NUMBER ,create_scn IN NUMBER ,incr_level IN NUMBER ,ckp_scn IN NUMBER ,ckp_time IN DATE ,onl_fuzzy IN VARCHAR2 ,bck_fuzzy IN VARCHAR2 ,abs_fuzzy_scn IN NUMBER ,rcv_fuzzy_scn IN NUMBER ,rcv_fuzzy_time IN DATE ,blocks IN NUMBER ,block_size IN NUMBER ,device_type IN VARCHAR2 ,handle IN VARCHAR2 ,comments IN VARCHAR2 ,media IN VARCHAR2 ,media_pool IN NUMBER ,start_time IN VARCHAR2 ,completion_time IN DATE ,status IN VARCHAR2 ,keep_options IN NUMBER DEFAULT 0 ,keep_until IN DATE DEFAULT NULL ,rsr_key IN NUMBER ,create_time IN DATE ,foreign_dbid IN number ,plugged_readonly IN varchar2 ,plugin_scn IN number ,plugin_reset_scn IN number ,plugin_reset_time IN date ) IS local xdf%rowtype; BEGIN BEGIN IF (status <> 'D') THEN INSERT INTO xdf(xdf_key, dbinc_key, xdf_recid, xdf_stamp, file#, create_scn, tag, incr_level, ckp_scn, ckp_time, onl_fuzzy, bck_fuzzy, abs_fuzzy_scn, rcv_fuzzy_scn, rcv_fuzzy_time, blocks, block_size, device_type, handle, handle_hashkey, comments, media, media_pool, start_time, completion_time, status, keep_options, keep_until, rsr_key, site_key, foreign_dbid, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time) VALUES (rman_seq.nextval, dbinc_key, xdf_recid, xdf_stamp, file#, create_scn, tag, incr_level, ckp_scn, ckp_time, decode(onl_fuzzy,'YES','Y','NO','N'), decode(bck_fuzzy,'YES','Y','NO','N'), abs_fuzzy_scn, rcv_fuzzy_scn, rcv_fuzzy_time, blocks, block_size, device_type, handle, substr(device_type,1,10)||substr(handle,1,10)||substr(handle,-10), comments, media, media_pool, start_time, completion_time, status, keep_options, keep_until, rsr_key, this_site_key, foreign_dbid, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time); deleteDuplicateXDF(xdf_recid, xdf_stamp, device_type, handle); END IF; EXCEPTION WHEN dup_val_on_index THEN deb('addProxyDatafile - Inside dup_val_on_index exception'); SELECT * INTO local FROM xdf WHERE xdf.dbinc_key = addProxyDataFile.dbinc_key AND xdf.xdf_recid = addProxyDataFile.xdf_recid AND xdf.xdf_stamp = addProxyDataFile.xdf_stamp; -- change stamp and resync record, if client is site aware... IF client_site_aware AND this_site_key <> local.site_key THEN raise_application_error(-20081, 'change stamp for the record'); END IF; -- check the file# and creation scn IF (file# <> local.file#) THEN raise_application_error(-20096, 'Invalid file'); END IF; IF (create_scn <> local.create_scn AND plugin_scn <> local.plugin_scn) THEN raise_application_error(-20097, 'Invalid create scn'); END IF; -- If site_key is null, update with this_site_key IF local.site_key IS NULL THEN UPDATE xdf SET site_key = this_site_key WHERE xdf.dbinc_key = addProxyDataFile.dbinc_key AND xdf.xdf_recid = addProxyDataFile.xdf_recid AND xdf.xdf_stamp = addProxyDataFile.xdf_stamp; END IF; END; END addProxyDataFile; PROCEDURE deleteDuplicateXAL(recid IN NUMBER, stamp IN NUMBER, device_type IN VARCHAR2, handle IN VARCHAR2) IS lhandle xal.handle%TYPE; ldevice_type xal.device_type%TYPE; BEGIN lhandle := handle; IF lhandle IS NULL OR ldevice_type IS NULL THEN BEGIN SELECT handle, device_type INTO lhandle, ldevice_type FROM xal WHERE xal.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND xal_recid = recid AND xal_stamp = stamp; EXCEPTION WHEN no_data_found THEN RETURN; WHEN too_many_rows THEN -- unique_key is dbinc_key, recid and stamp RETURN; END; END IF; -- Delete old copies as the new copy should have overwritten it DELETE xal WHERE xal.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND xal.device_type = ldevice_type AND xal.handle = lhandle AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xal.site_key, this_site_key))) AND xal.handle_hashkey = substr(ldevice_type, 1, 10) || substr(lhandle, 1, 10) || substr(lhandle, -10) AND NOT (xal.xal_recid = recid AND xal.xal_stamp = stamp); END deleteDuplicateXAL; PROCEDURE addProxyArchivedLog( dbinc_key IN NUMBER ,xal_recid IN NUMBER ,xal_stamp IN NUMBER ,tag IN VARCHAR2 ,thread# IN NUMBER ,sequence# IN NUMBER ,resetlogs_change# IN NUMBER ,resetlogs_time IN DATE ,first_change# IN NUMBER ,first_time IN DATE ,next_change# IN NUMBER ,next_time IN DATE ,blocks IN NUMBER ,block_size IN NUMBER ,device_type IN VARCHAR2 ,handle IN VARCHAR2 ,comments IN VARCHAR2 ,media IN VARCHAR2 ,media_pool IN NUMBER ,start_time IN VARCHAR2 ,completion_time IN DATE ,status IN VARCHAR2 ,rsr_key IN NUMBER ,terminal IN VARCHAR2 default 'NO' ,keep_until IN DATE default NULL ,keep_options IN NUMBER default 0 ) IS local xal%rowtype; BEGIN BEGIN IF (status <> 'D') THEN INSERT INTO xal(xal_key, dbinc_key, xal_recid, xal_stamp, tag, thread#, sequence#, low_scn, low_time, next_scn, next_time, blocks, block_size, device_type, handle, handle_hashkey, comments, media, media_pool, start_time, completion_time, status, rsr_key, terminal, keep_until, keep_options, site_key) VALUES (rman_seq.nextval, dbinc_key, xal_recid, xal_stamp, tag, thread#, sequence#, first_change#, first_time, next_change#, next_time, blocks, block_size, device_type, handle, substr(device_type,1,10)||substr(handle,1,10)||substr(handle,-10), comments, media, media_pool, start_time, completion_time, status, rsr_key, terminal, keep_until, keep_options, this_site_key); deleteDuplicateXAL(xal_recid, xal_stamp, device_type, handle); END IF; EXCEPTION WHEN dup_val_on_index THEN deb('addProxyArchivedLog - Inside dup_val_on_index exception'); SELECT * INTO local FROM xal WHERE xal.dbinc_key = addProxyArchivedLog.dbinc_key AND xal.xal_recid = addProxyArchivedLog.xal_recid AND xal.xal_stamp = addProxyArchivedLog.xal_stamp; -- change stamp and resync record, if client is site aware... IF client_site_aware AND this_site_key <> local.site_key THEN raise_application_error(-20081, 'change stamp for the record'); END IF; -- check the low_scn IF (first_change# <> local.low_scn) THEN raise_application_error(-20098, 'Invalid low scn'); END IF; -- If site_key is null, update with this_site_key IF local.site_key IS NULL THEN UPDATE xal SET site_key = this_site_key WHERE xal.dbinc_key = addProxyArchivedLog.dbinc_key AND xal.xal_recid = addProxyArchivedLog.xal_recid AND xal.xal_stamp = addProxyArchivedLog.xal_stamp; END IF; END; IF (keep_options > 0) THEN updateRestorePoint(first_change#, next_change#); END IF; END addProxyArchivedLog; -- Note that this function will be used to start the resync of both proxy -- datafiles and archived logs, because they both share the same recids. FUNCTION beginProxyResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_pc_recid INTO last_xdf_recid FROM node WHERE site_key = this_site_key; ELSE last_xdf_recid := sessionWaterMarks.high_pc_recid; END IF; last_xal_recid := last_xdf_recid; RETURN last_xdf_recid; END beginProxyResync; PROCEDURE checkProxyDataFile( xdf_recid IN NUMBER ,xdf_stamp IN NUMBER ,tag IN VARCHAR2 ,file# IN NUMBER ,create_scn IN NUMBER ,create_time IN DATE ,reset_scn IN NUMBER ,reset_time IN DATE ,incr_level IN NUMBER ,ckp_scn IN NUMBER ,ckp_time IN DATE ,onl_fuzzy IN VARCHAR2 ,bck_fuzzy IN VARCHAR2 ,abs_fuzzy_scn IN NUMBER ,rcv_fuzzy_scn IN NUMBER ,rcv_fuzzy_time IN DATE ,blocks IN NUMBER ,block_size IN NUMBER ,min_offr_recid IN NUMBER ,device_type IN VARCHAR2 ,handle IN VARCHAR2 ,comments IN VARCHAR2 ,media IN VARCHAR2 ,media_pool IN NUMBER ,start_time IN DATE ,completion_time IN DATE ,status IN VARCHAR2 ,controlfile_type IN VARCHAR2 DEFAULT NULL ,keep_options IN NUMBER DEFAULT 0 ,keep_until IN DATE DEFAULT NULL ,rsr_recid IN NUMBER DEFAULT NULL ,rsr_stamp IN NUMBER DEFAULT NULL ,foreign_dbid IN number DEFAULT 0 ,plugged_readonly IN varchar2 DEFAULT 'NO' ,plugin_scn IN number DEFAULT 0 ,plugin_reset_scn IN number DEFAULT 0 ,plugin_reset_time IN date DEFAULT NULL ) IS dbinc_key NUMBER; localrsr rsr%rowtype; BEGIN IF (last_xdf_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (xdf_recid < last_xdf_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; -- It is OK to have a gap in the recid for proxy records, because -- v$proxy_datafile and v$proxy_archivedlog share the same recid sequence. last_xdf_recid := xdf_recid; IF (xdf_stamp < kccdivts) THEN deb('checkProxyDatafile - ignoring record kccdivts='||kccdivts); RETURN; -- obsolete record from a backup controlfile END IF; -- Find the dbinc_key that this proxy backup belongs to. It is not -- necessarily this_dbinc_key. dbinc_key := checkIncarnation(reset_scn, reset_time); -- Find the owning rsr row and get its key. BEGIN SELECT rsr_key INTO localrsr.rsr_key FROM rsr WHERE rsr.dbinc_key = this_dbinc_key AND (rsr.site_key = this_site_key OR rsr.site_key is null AND this_site_key is null) AND rsr.rsr_stamp = checkProxyDataFile.rsr_stamp AND rsr.rsr_recid = checkProxyDataFile.rsr_recid; EXCEPTION WHEN no_data_found THEN -- No rsr record available - ignore. NULL; END; IF (file# = 0) THEN addProxyControlFile(dbinc_key, xdf_recid, xdf_stamp, tag, ckp_scn, ckp_time, create_time, min_offr_recid, block_size, device_type, handle, comments, media, media_pool, start_time, completion_time, status, controlfile_type, keep_options, keep_until, localrsr.rsr_key, blocks); ELSE addProxyDataFile(dbinc_key, xdf_recid, xdf_stamp, tag, file#, create_scn, incr_level, ckp_scn, ckp_time, onl_fuzzy, bck_fuzzy, abs_fuzzy_scn, rcv_fuzzy_scn, rcv_fuzzy_time, blocks, block_size, device_type, handle, comments, media, media_pool, start_time, completion_time, status, keep_options, keep_until, localrsr.rsr_key, create_time, foreign_dbid, plugged_readonly, plugin_scn, plugin_reset_scn, plugin_reset_time); END IF; END checkProxyDataFile; PROCEDURE checkProxyArchivedLog( xal_recid IN NUMBER ,xal_stamp IN NUMBER ,tag IN VARCHAR2 ,thread# IN NUMBER ,sequence# IN NUMBER ,resetlogs_change# IN NUMBER ,resetlogs_time IN DATE ,first_change# IN NUMBER ,first_time IN DATE ,next_change# IN NUMBER ,next_time IN DATE ,blocks IN NUMBER ,block_size IN NUMBER ,device_type IN VARCHAR2 ,handle IN VARCHAR2 ,comments IN VARCHAR2 ,media IN VARCHAR2 ,media_pool IN NUMBER ,start_time IN DATE ,completion_time IN DATE ,status IN VARCHAR2 ,rsr_recid IN NUMBER ,rsr_stamp IN NUMBER ,terminal IN VARCHAR2 default 'NO' ,keep_until IN DATE default NULL ,keep_options IN NUMBER default 0 ) IS dbinc_key NUMBER; localrsr rsr%rowtype; BEGIN IF (last_xal_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (xal_recid < last_xal_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; -- It is OK to have a gap in the recid for proxy records, because -- v$proxy_datafile and v$proxy_archivedlog share the same recid sequence. last_xal_recid := xal_recid; IF (xal_stamp < kccdivts) THEN deb('checkProxyArchivedLog - ignoring record kccdivts='||kccdivts); RETURN; -- obsolete record from a backup controlfile END IF; -- find the dbinc_key that this proxy backup belongs to. It is not -- necessarily this_dbinc_key. dbinc_key := checkIncarnation(resetlogs_change#, resetlogs_time); -- Find the owning rsr row and get its key. BEGIN SELECT rsr_key INTO localrsr.rsr_key FROM rsr WHERE rsr.dbinc_key = this_dbinc_key AND (rsr.site_key = this_site_key OR rsr.site_key is null AND this_site_key is null) AND rsr.rsr_stamp = checkProxyArchivedLog.rsr_stamp AND rsr.rsr_recid = checkProxyArchivedLog.rsr_recid; EXCEPTION WHEN no_data_found THEN -- No rsr record available - ignore. NULL; END; addProxyArchivedLog(dbinc_key, xal_recid, xal_stamp, tag, thread#, sequence#, resetlogs_change#, resetlogs_time, first_change#, first_time, next_change#, next_time, blocks, block_size, device_type, handle, comments, media, media_pool, start_time, completion_time, status, localrsr.rsr_key, terminal, keep_until, keep_options); END checkProxyArchivedLog; PROCEDURE endProxyResync IS last_pc_recid number := greatest(nvl(last_xdf_recid,0), nvl(last_xal_recid,0)); BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_pc_recid = last_pc_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_pc_recid := last_pc_recid; last_xdf_recid := NULL; last_xal_recid := NULL; END endProxyResync; /*----------------------------* * Corrupt Block resync * *----------------------------*/ FUNCTION beginBackupCorruptionResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_bcb_recid INTO last_bcb_recid FROM node WHERE site_key = this_site_key; ELSE last_bcb_recid := sessionWaterMarks.high_bcb_recid; END IF; RETURN last_bcb_recid; END beginBackupCorruptionResync; PROCEDURE checkBackupCorruption( bcb_recid IN NUMBER ,bcb_stamp IN NUMBER ,set_stamp IN NUMBER ,set_count IN NUMBER ,piece# IN NUMBER ,file# IN NUMBER ,block# IN NUMBER ,blocks IN NUMBER ,corrupt_scn IN NUMBER ,marked_corrupt IN VARCHAR2 ,corruption_type IN VARCHAR2 ) IS local bcb%rowtype; BEGIN IF (last_bcb_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (bcb_recid < last_bcb_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (bcb_recid > last_bcb_recid + 1) THEN -- there is gap in deleted object records -- not sure what we should do here NULL; END IF; last_bcb_recid := bcb_recid; IF (bcb_stamp < kccdivts) THEN deb('checkBackupCorruption - ignoring record kccdivts='||kccdivts); RETURN; -- obsolete record from a backup controlfile END IF; -- find the bdf_key to which this corrupt block belongs BEGIN SELECT bdf_key INTO local.bdf_key FROM bdf, bs WHERE bdf.bs_key = bs.bs_key AND bs.db_key = this_db_key AND bs.set_stamp = checkBackupCorruption.set_stamp AND bs.set_count = checkBackupCorruption.set_count AND bdf.file# = checkBackupCorruption.file#; EXCEPTION WHEN no_data_found THEN -- if bdf_key is not found, ignore this corrupt block RETURN; END; BEGIN INSERT INTO bcb (bdf_key, bcb_recid, bcb_stamp, piece#, block#, blocks, corrupt_scn, marked_corrupt, corruption_type) VALUES (local.bdf_key, bcb_recid, bcb_stamp, piece#, block#, blocks, corrupt_scn, decode(marked_corrupt,'YES','Y','NO','N'), corruption_type); EXCEPTION WHEN dup_val_on_index THEN -- the corrupt block is already in rcvcat, so do nothing RETURN; END; END checkBackupCorruption; PROCEDURE endBackupCorruptionResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_bcb_recid = last_bcb_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_bcb_recid := last_bcb_recid; last_bcb_recid := NULL; END endBackupCorruptionResync; FUNCTION beginCopyCorruptionResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_ccb_recid INTO last_ccb_recid FROM node WHERE site_key = this_site_key; ELSE last_ccb_recid := sessionWaterMarks.high_ccb_recid; END IF; RETURN last_ccb_recid; END beginCopyCorruptionResync; PROCEDURE checkCopyCorruption( ccb_recid IN NUMBER ,ccb_stamp IN NUMBER ,cdf_recid IN NUMBER ,cdf_stamp IN NUMBER ,file# IN NUMBER ,block# IN NUMBER ,blocks IN NUMBER ,corrupt_scn IN NUMBER ,marked_corrupt IN VARCHAR2 ,corruption_type IN VARCHAR2 ) IS local ccb%rowtype; BEGIN IF (last_ccb_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (ccb_recid < last_ccb_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (ccb_recid > last_ccb_recid + 1) THEN -- there is gap in deleted object records -- not sure what we should do here NULL; END IF; last_ccb_recid := ccb_recid; IF (ccb_stamp < kccdivts) THEN deb('checkCopyCorruption - ignoring record kccdivts='||kccdivts); RETURN; -- obsolete record from a backup controlfile END IF; -- find the cdf_key to which this corrupt block belongs BEGIN SELECT cdf_key INTO local.cdf_key FROM cdf WHERE cdf.dbinc_key = this_dbinc_key AND cdf.cdf_recid = checkCopyCorruption.cdf_recid AND cdf.cdf_stamp = checkCopyCorruption.cdf_stamp AND cdf.file# = checkCopyCorruption.file#; EXCEPTION WHEN no_data_found THEN -- if cdf_key is not found, ignore this corrupt block RETURN; END; BEGIN INSERT INTO ccb (cdf_key, ccb_recid, ccb_stamp, block#, blocks, corrupt_scn, marked_corrupt, corruption_type) VALUES (local.cdf_key, ccb_recid, ccb_stamp, block#, blocks, corrupt_scn, decode(marked_corrupt,'YES','Y','NO','N'), corruption_type); EXCEPTION WHEN dup_val_on_index THEN -- the corrupt block is already in rcvcat, so do nothing RETURN; END; END checkCopyCorruption; PROCEDURE endCopyCorruptionResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_ccb_recid = last_ccb_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_ccb_recid := last_ccb_recid; last_ccb_recid := NULL; END endCopyCorruptionResync; FUNCTION beginBlockCorruptionResync( low_bcr_recid IN number) RETURN NUMBER IS old_bcr_recid number; BEGIN checkResync; SELECT high_bcr_recid, low_bcr_recid INTO last_bcr_recid, old_bcr_recid FROM node WHERE site_key = this_site_key; -- If oldest recid doesn't match, then purge all the catalog entries. -- This is in order to avoid duplicate block ranges in bcr table for -- the records that are reused. -- -- NOTE!!! -- This has to be investigated later to check if it is possible to -- keep the block corruption ranges that are not known to controlfile -- IF (old_bcr_recid != low_bcr_recid) THEN DELETE bcr WHERE site_key = this_site_key AND bcr_recid < low_bcr_recid; UPDATE node SET low_bcr_recid = low_bcr_recid WHERE site_key = this_site_key; END IF; RETURN last_bcr_recid; END beginBlockCorruptionResync; PROCEDURE checkBlockCorruption( bcr_recid IN NUMBER ,bcr_stamp IN NUMBER ,file# IN NUMBER ,create_scn IN NUMBER ,create_time IN DATE ,block# IN NUMBER ,blocks IN NUMBER ,corrupt_scn IN NUMBER ,corruption_type IN VARCHAR2 ) IS local df%rowtype; BEGIN IF (last_bcr_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (bcr_recid < last_bcr_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (bcr_recid > last_bcr_recid + 1) THEN -- there is gap in deleted object records -- not sure what we should do here NULL; END IF; last_bcr_recid := bcr_recid; -- find the df_key to which this corrupt block belongs BEGIN SELECT distinct df.df_key INTO local.df_key FROM df, site_dfatt WHERE df.df_key = site_dfatt.df_key AND site_dfatt.site_key = this_site_key AND df.file# = checkBlockCorruption.file# AND df.create_scn = checkBlockCorruption.create_scn AND df.create_time = checkBlockCorruption.create_time; EXCEPTION WHEN no_data_found THEN -- if df_key is not found, ignore this corrupt block deb('checkBlockCorruption - no df_key found'); RETURN; END; deb('checkBlockCorruption - df_key=' || local.df_key); BEGIN INSERT INTO bcr (bcr_recid, bcr_stamp, df_key, site_key, block#, blocks, corrupt_scn, corruption_type) VALUES (bcr_recid, bcr_stamp, local.df_key, this_site_key, block#, blocks, corrupt_scn, corruption_type); EXCEPTION WHEN dup_val_on_index THEN -- the corrupt block is already in rcvcat, so do nothing RETURN; END; END checkBlockCorruption; PROCEDURE endBlockCorruptionResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_bcr_recid = last_bcr_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_bcr_recid := last_bcr_recid; last_bcr_recid := NULL; END endBlockCorruptionResync; /*----------------------------* * Deleted Object resync * *----------------------------*/ FUNCTION beginDeletedObjectResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_do_recid INTO last_do_recid FROM node WHERE site_key = this_site_key; ELSE last_do_recid := sessionWaterMarks.high_do_recid; END IF; RETURN last_do_recid; END beginDeletedObjectResync; PROCEDURE checkDeletedObject( do_recid IN NUMBER ,do_stamp IN NUMBER ,object_type IN VARCHAR2 ,object_recid IN NUMBER ,object_stamp IN NUMBER ,object_data IN NUMBER DEFAULT NULL ,object_fname IN VARCHAR2 DEFAULT NULL ,object_create_scn IN NUMBER DEFAULT NULL ,set_stamp IN NUMBER DEFAULT NULL ,set_count IN NUMBER DEFAULT NULL) IS local bp%rowtype; new_status VARCHAR2(1); rc boolean; keep_options number := NULL; keep_until date := NULL; BEGIN IF (last_do_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (do_recid < last_do_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (do_recid > last_do_recid + 1) THEN -- there is gap in deleted object records -- not sure what we should do here NULL; END IF; last_do_recid := do_recid; IF (do_stamp < kccdivts) THEN deb('checkDeletedObject - ignoring record kccdivts='||kccdivts); RETURN; -- obsolete record from a backup controlfile END IF; -- change/delete the objects. If the update fails to update a row, -- we do not care; it just means that the object has been deleted -- from or never inserted into rcvcat. IF (object_type like 'BACKUP SET%') THEN IF (object_type = 'BACKUP SET KEEP UNTIL') THEN keep_until := stamp2date(object_data); ELSIF (object_type = 'BACKUP SET KEEP OPTIONS') THEN keep_options := object_data; ELSE raise_application_error(-20999, 'Internal error in checkDeletedObject(): bad object_type '|| object_type); END IF; changeBackupSet(object_recid, object_stamp, keep_options, keep_until); END IF; IF (object_type like 'BACKUP PIECE%') THEN IF (object_type = 'BACKUP PIECE') THEN new_status := 'D'; ELSIF (object_type = 'BACKUP PIECE AVAILABLE') THEN new_status := 'A'; ELSIF (object_type = 'BACKUP PIECE EXPIRED') THEN new_status := 'X'; ELSIF (object_type = 'BACKUP PIECE UNAVAILABLE') THEN new_status := 'U'; ELSE raise_application_error(-20999, 'Internal error in checkDeletedObject(): bad object_type '|| object_type); END IF; changeBackupPiece(object_recid, object_stamp, new_status, set_stamp, set_count); END IF; IF (object_type like 'DATAFILE COPY%') THEN IF (object_type = 'DATAFILE COPY') THEN new_status := 'D'; ELSIF (object_type = 'DATAFILE COPY AVAILABLE') THEN new_status := 'A'; ELSIF (object_type = 'DATAFILE COPY EXPIRED') THEN new_status := 'X'; ELSIF (object_type = 'DATAFILE COPY UNAVAILABLE') THEN new_status := 'U'; ELSIF (object_type = 'DATAFILE COPY KEEP UNTIL') THEN new_status := NULL; keep_until := stamp2date(object_data); ELSIF (object_type = 'DATAFILE COPY KEEP OPTIONS') THEN new_status := NULL; keep_options := object_data; ELSE raise_application_error(-20999, 'Internal error in checkDeletedObject(): bad object_type '|| object_type); END IF; changeDatafileCopy(object_recid, object_stamp, new_status, keep_options, keep_until); END IF; IF (object_type like 'ARCHIVED LOG%') THEN IF (object_type = 'ARCHIVED LOG') THEN new_status := 'D'; ELSIF (object_type = 'ARCHIVED LOG AVAILABLE') THEN new_status := 'A'; ELSIF (object_type = 'ARCHIVED LOG EXPIRED') THEN new_status := 'X'; ELSIF (object_type = 'ARCHIVED LOG UNAVAILABLE') THEN new_status := 'U'; ELSE raise_application_error(-20999, 'Internal error in checkDeletedObject(): bad object_type '|| object_type); END IF; changeArchivedLog(object_recid, object_stamp, new_status); END IF; IF (object_type like 'PROXY COPY%') THEN IF (object_type = 'PROXY COPY') THEN new_status := 'D'; ELSIF (object_type = 'PROXY COPY AVAILABLE') THEN new_status := 'A'; ELSIF (object_type = 'PROXY COPY EXPIRED') THEN new_status := 'X'; ELSIF (object_type = 'PROXY COPY UNAVAILABLE') THEN new_status := 'U'; ELSIF (object_type = 'PROXY COPY KEEP UNTIL') THEN new_status := NULL; keep_until := stamp2date(object_data); ELSIF (object_type = 'PROXY COPY KEEP OPTIONS') THEN new_status := NULL; keep_options := object_data; ELSE raise_application_error(-20999, 'Internal error in checkDeletedObject(): bad object_type '|| object_type); END IF; changeProxyCopy(object_recid, object_stamp, new_status, keep_options, keep_until); END IF; IF (object_type = 'DATAFILE RENAME ON RESTORE') THEN deb('checkDeletedObject - renaming file#='||object_data||' to '|| object_fname); -- We should rename the datafile. We will do that by updating -- the current dfatt record. Note that this is ignored during -- full resync - in that case the filename is already resynced. -- Incase we could not find the df_key, ignore the file rename as -- the deleted object belongs to dropped file. DECLARE local_df_key NUMBER; BEGIN SELECT df_key INTO local_df_key FROM df WHERE dbinc_key = this_dbinc_key AND df.file# = object_data AND df.create_scn = object_create_scn; UPDATE site_dfatt SET fname = object_fname WHERE site_key = this_site_key AND df_key = local_df_key; IF (NOT SQL%FOUND) THEN deb('checkDeletedObject - doing an insert'); INSERT INTO site_dfatt (fname, df_key, site_key) VALUES (object_fname, local_df_key, this_site_key); END IF; EXCEPTION WHEN no_data_found THEN NULL; END; END IF; IF (object_type = 'PLUGGED READONLY RENAME') THEN deb('In checkDeletedObject, renaming plugged readonly file#='|| object_data||' to ' ||object_fname); -- This is similar to renaming a datafile but for a plugged readonly -- file. But, plugin scn should be compared. -- bug 8947742: -- Note that a plugin file with same file# can have two different -- actual creation_change#, one before the file was plugged in (read only -- state), and other when it was made read-write. Hence, both the entries -- for the file name should be updated in site_dfatt table. DECLARE CURSOR df_key_plugin_cur(file# IN NUMBER, plugin_scn IN NUMBER) IS SELECT df_key FROM df WHERE dbinc_key = this_dbinc_key AND df.file# = df_key_plugin_cur.file# AND df.plugin_scn = df_key_plugin_cur.plugin_scn; BEGIN FOR plugin_df_key IN df_key_plugin_cur(object_data, object_create_scn) LOOP UPDATE site_dfatt SET fname = object_fname WHERE site_key = this_site_key AND df_key = plugin_df_key.df_key; IF (NOT SQL%FOUND) THEN deb('checkDeletedObject - doing an insert'); INSERT INTO site_dfatt (fname, df_key, site_key) VALUES (object_fname, plugin_df_key.df_key, this_site_key); END IF; END LOOP; END; END IF; IF (object_type = 'TEMPFILE RENAME') THEN deb('checkDeletedObject - renaming temp file#='||object_data||' to'|| object_fname); -- We should rename the tempfile. We will do that by updating -- the current site_tfatt record. Note that this is ignored during -- full resync - in that case the filename is already resynced. -- Incase we could not find the tf_key, ignore the file rename as -- the deleted object belongs to dropped file. DECLARE local_tf_key NUMBER; BEGIN SELECT tf_key INTO local_tf_key FROM tf WHERE dbinc_key = this_dbinc_key AND tf.file# = object_data AND tf.create_scn = object_create_scn; UPDATE site_tfatt SET fname = object_fname WHERE site_key = this_site_key AND tf_key = local_tf_key; IF (NOT SQL%FOUND) THEN INSERT INTO site_tfatt (fname, tf_key, site_key) VALUES (object_fname, local_tf_key, this_site_key); END IF; EXCEPTION WHEN no_data_found THEN NULL; END; END IF; IF (object_type = 'DATABASE BLOCK CORRUPTION') THEN DELETE bcr WHERE site_key = this_site_key AND bcr_recid = object_recid AND bcr_stamp = object_stamp; END IF; IF (object_type = 'RESTORE POINT') THEN DELETE nrsp WHERE site_key = this_site_key AND nrsp_recid = object_recid AND nrsp_stamp = object_stamp; END IF; END checkDeletedObject; PROCEDURE endDeletedObjectResync IS BEGIN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_do_recid = last_do_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_do_recid := last_do_recid; last_do_recid := NULL; END endDeletedObjectResync; /*----------------------------* * RMAN Output resync * *----------------------------*/ FUNCTION beginRmanOutputResync(start_timestamp IN NUMBER) RETURN NUMBER IS BEGIN deb('beginRmanOutputResync - input instance start time='||start_timestamp); checkResync; -- We must have a node entry by now... other let it signal error SELECT inst_startup_stamp, high_rout_stamp into last_inst_startup_stamp, last_rout_stamp from node where node.db_key = this_db_key and ((this_db_unique_name is null and node.db_unique_name is null) or node.db_unique_name = this_db_unique_name); deb('beginRmanOutputResync - last_inst_startup_stamp='|| last_inst_startup_stamp||',last_rout_stamp='||last_rout_stamp); -- If instance has been re-start for this node, get all rows from -- v$rman_output, Otherwise get from last stamp. IF (last_inst_startup_stamp <> start_timestamp) THEN last_rout_stamp := sessionWaterMarks.high_rout_stamp; last_inst_startup_stamp := start_timestamp; END IF; RETURN last_rout_stamp; END beginRmanOutputResync; -- bug 10143694 PROCEDURE insertCachedROUT IS errors NUMBER; dml_errors EXCEPTION; PRAGMA EXCEPTION_INIT(dml_errors, -24381); BEGIN IF lrout_curridx = 0 THEN RETURN; END IF; deb('doing bulk update of ' || lrout_curridx || ' rows into ROUT'); BEGIN FORALL i in 1..lrout_curridx SAVE EXCEPTIONS INSERT INTO ROUT VALUES lrout_table(i); EXCEPTION WHEN dml_errors THEN errors := SQL%BULK_EXCEPTIONS.COUNT; deb('Number of statements that failed: ' || errors); FOR i IN 1..errors LOOP deb('Error #' || i || ' occurred during '|| 'iteration #' || SQL%BULK_EXCEPTIONS(i).ERROR_INDEX); deb('Error message is ' || SQLERRM(-SQL%BULK_EXCEPTIONS(i).ERROR_CODE)); -- If dup_val_on_index, update the text in rout table IF -SQL%BULK_EXCEPTIONS(i).ERROR_CODE = -1 THEN UPDATE ROUT SET rout_text = lrout_table(i).rout_text WHERE db_key = lrout_table(i).db_key AND rsr_key = lrout_table(i).rsr_key AND rout_skey = lrout_table(i).rout_skey AND rout_stamp = lrout_table(i).rout_stamp AND rout_recid = lrout_table(i).rout_recid; ELSE lrout_curridx := 0; RAISE; END IF; END LOOP; END; lrout_curridx := 0; END insertCachedROUT; PROCEDURE checkRmanOutput( recid IN NUMBER ,stamp IN NUMBER ,session_recid IN NUMBER ,session_stamp IN NUMBER ,rman_status_recid IN NUMBER ,rman_status_stamp IN NUMBER ,output IN VARCHAR2) IS BEGIN deb('checkRmanOutput', RCVCAT_LEVEL_HI); -- advance last_rout_stamp if current record stamp is greater IF (last_rout_stamp < stamp) THEN last_rout_stamp := stamp; END IF; IF lrman_status_recid = rman_status_recid AND lrman_status_stamp = rman_status_stamp THEN goto rsr_key_known; END IF; deb('checkRmanOutput - Find rsr_ key'); BEGIN select rsr_key into lrsr_key from rsr where rsr.dbinc_key = this_dbinc_key and ((rsr.site_key = this_site_key) or (rsr.site_key is null AND this_site_key is null)) and rsr.rsr_recid = rman_status_recid and rsr.rsr_stamp = rman_status_stamp; EXCEPTION WHEN no_data_found THEN -- no command record avaiable - ignore this record deb('checkRmanOutput - ignoring following RMAN output row'); RETURN; END; <> IF lsession_recid = session_recid AND lsession_stamp = session_stamp THEN goto rout_skey_known; END IF; deb('checkRmanOutput - Find session key'); BEGIN -- when startup force is done within a single RMAN client session, -- multiple parent rsr_keys will be created by client since the client -- knows about previous server session also. This causes following query -- to return multiple rows in such cases, so we associate the o/p to the -- latest rman server session. select max(rsr_key) into lrout_skey from rsr, dbinc where rsr.dbinc_key = dbinc.dbinc_key and dbinc.db_key = this_db_key and (rsr.site_key = this_site_key or rsr.site_key is null AND this_site_key is null) and rsr.rsr_srecid = session_recid and rsr.rsr_sstamp = session_stamp and rsr.rsr_type = 'SESSION'; EXCEPTION WHEN no_data_found THEN -- no command record avaiable - ignore this record deb('checkRmanOutput - ignoring following RMAN output row, cause2'); RETURN; WHEN others THEN deb('checkRmanOutput - signal error for RMAN output row'); RAISE; END; <> IF lrout_skey is null THEN -- no command record avaiable - ignore this record deb('checkRmanOutput: skey not found, ignoring RMAN output row'); RETURN; END IF; -- Bulk insert ROUT table to avoid performance problems of caling -- insert for each row. BEGIN lrout_curridx := lrout_curridx + 1; lrout_table(lrout_curridx).db_key := this_db_key; lrout_table(lrout_curridx).rsr_key := lrsr_key; lrout_table(lrout_curridx).rout_skey := lrout_skey; lrout_table(lrout_curridx).rout_recid := recid; lrout_table(lrout_curridx).rout_stamp := stamp; lrout_table(lrout_curridx).rout_text := substrb(output, 1, krbmror_llength_bytes); -- bug 5906892 -- Once we have cached 1000 rows, do a bulk insert IF lrout_curridx = 1000 THEN insertCachedROUT; END IF; END; lrman_status_recid := rman_status_recid; lrman_status_stamp := rman_status_stamp; lsession_recid := session_recid; lsession_stamp := session_stamp; END checkRmanOutput; PROCEDURE endRmanOutputResync IS BEGIN IF lrout_curridx > 0 THEN insertCachedROUT; END IF; UPDATE node SET high_rout_stamp = last_rout_stamp, inst_startup_stamp = last_inst_startup_stamp WHERE node.db_key = this_db_key and ((this_db_unique_name is null and node.db_unique_name is null) or node.db_unique_name = this_db_unique_name); sessionWaterMarks.high_rout_stamp := last_rout_stamp; last_rout_stamp := NULL; last_inst_startup_stamp := NULL; lrsr_key := NULL; lrout_skey := NULL; lsession_recid := NULL; lsession_stamp := NULL; lrman_status_recid := NULL; lrman_status_stamp := NULL; END endRmanOutputResync; /*----------------------------* * RMAN Status resync * *----------------------------*/ FUNCTION beginRmanStatusResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_rsr_recid INTO last_rsr_recid FROM node WHERE site_key = this_site_key; ELSE last_rsr_recid := sessionWaterMarks.high_rsr_recid; END IF; RETURN last_rsr_recid; END beginRmanStatusResync; PROCEDURE checkRmanStatus( recid IN NUMBER ,stamp IN NUMBER ,parent_recid IN NUMBER ,parent_stamp IN NUMBER ,row_level IN NUMBER ,row_type IN VARCHAR2 ,command_id IN VARCHAR2 ,operation IN VARCHAR2 ,status IN VARCHAR2 ,mbytes_processed IN NUMBER ,start_time IN DATE ,end_time IN DATE ,ibytes IN NUMBER default null ,obytes IN NUMBER default null ,optimized IN VARCHAR2 default null ,otype IN VARCHAR2 default null ,session_recid IN NUMBER default null ,session_stamp IN NUMBER default null ,odevtype IN VARCHAR2 default null ,osb_allocated IN VARCHAR2 default 'NO') IS parent rsr%rowtype; BEGIN IF (last_rsr_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (recid < last_rsr_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (stamp < kccdivts) THEN RETURN; -- obsolete record from a backup controlfile END IF; parent.rsr_pkey := NULL; parent.rsr_l0key := NULL; -- Find the key of the parent (if any). The recid of the level 0 -- (RMAN session) is taken from the parent: if the parent is level 0, then -- we take its key, otherwise key of the level 0 row is in rsr_l0key. IF (checkRmanStatus.row_level > 0) THEN deb('checkRmanStatus - row_level='||to_char(checkRmanStatus.row_level)); BEGIN SELECT rsr_key, decode(rsr_level, 0, rsr_key, rsr_l0key) INTO parent.rsr_key, parent.rsr_l0key FROM rsr WHERE rsr.dbinc_key = this_dbinc_key AND (rsr.site_key = this_site_key OR rsr.site_key is null AND this_site_key is null) AND rsr.rsr_stamp = checkRmanStatus.parent_stamp AND rsr.rsr_recid = checkRmanStatus.parent_recid; EXCEPTION WHEN no_data_found THEN -- no parent record avaiable - ignore this record deb('checkRmanStatus - ignoring this record'); RETURN; END; END IF; BEGIN deb('checkRmanStatus - inserting into rsr'); deb('checkRmanStatus - this_dbinc_key:'||to_char(this_dbinc_key)); deb('checkRmanStatus - recid: '||to_char(recid)); deb('checkRmanStatus - stamp: '||to_char(stamp)); deb('checkRmanStatus - srecid: '||to_char(session_recid)); deb('checkRmanStatus - sstamp: '||to_char(session_stamp)); INSERT INTO rsr (rsr_key, dbinc_key, rsr_recid, rsr_stamp, rsr_pkey, rsr_l0key, rsr_level, rsr_type, rsr_oper, rsr_cmdid, rsr_status, rsr_mbytes, rsr_start, rsr_end, rsr_ibytes, rsr_obytes, rsr_optimized, rsr_otype, rsr_srecid, rsr_sstamp, rsr_odevtype, site_key, rsr_osb_allocated) VALUES (rman_seq.nextval, this_dbinc_key, recid, stamp, parent.rsr_key, parent.rsr_l0key, row_level, row_type, operation, command_id, status, mbytes_processed, start_time, end_time, ibytes, obytes, optimized, otype, session_recid, session_stamp, odevtype, this_site_key, decode(osb_allocated, 'YES', 'Y', 'N')); -- Cleanup the last rsr row which was inserted when catalog was not -- upgraded. DELETE rsr WHERE rsr.dbinc_key = this_dbinc_key AND rsr.rsr_recid = recid AND rsr.rsr_stamp = stamp AND ((this_site_key is not null AND rsr.site_key is NULL) OR (this_site_key is null and rsr.site_key is not null)); EXCEPTION WHEN dup_val_on_index THEN -- If the record already exist then just update the data. deb('checkRmanStatus - exception catch'); deb('checkRmanStatus - this_dbinc_key:'||to_char(this_dbinc_key)); deb('checkRmanStatus - recid: '||to_char(recid)); deb('checkRmanStatus - stamp: '||to_char(stamp)); deb('checkRmanStatus - srecid: '||to_char(session_recid)); deb('checkRmanStatus - sstamp: '||to_char(session_stamp)); UPDATE rsr SET rsr_pkey = parent.rsr_key, rsr_l0key = parent.rsr_l0key, rsr_level = row_level, rsr_type = row_type, rsr_oper = operation, rsr_cmdid = command_id, rsr_status = status, rsr_mbytes = mbytes_processed, rsr_start = start_time, rsr_end = end_time, rsr_ibytes = ibytes, rsr_obytes = obytes, rsr_optimized = optimized, rsr_otype = otype, rsr_odevtype = odevtype, rsr_osb_allocated = decode(osb_allocated, 'YES', 'Y', 'N') WHERE rsr.rsr_stamp = stamp AND rsr.rsr_recid = recid AND (rsr.site_key = this_site_key OR rsr.site_key is null and this_site_key is null) AND rsr.rsr_srecid = session_recid AND rsr.rsr_sstamp = session_stamp AND rsr.dbinc_key = this_dbinc_key; END; END checkRmanStatus; PROCEDURE endRmanStatusResync(recid number) IS BEGIN IF (last_rsr_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (recid < last_rsr_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_rsr_recid = recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_rsr_recid := recid; last_rsr_recid := NULL; END endRmanStatusResync; PROCEDURE updateRmanStatusRow( recid IN number ,stamp IN number ,mbytes IN number ,status IN binary_integer ) IS BEGIN IF (this_dbinc_key IS NULL) THEN return; END IF; UPDATE rsr SET rsr_status = decode(status, 1, 'RUNNING', 1+8, 'RUNNING WITH WARNINGS', 1+16, 'RUNNING WITH ERRORS', 1+8+16, 'RUNNING WITH ERRORS', 2, 'COMPLETED', 2+8, 'COMPLETED WITH WARNINGS', 2+16, 'COMPLETED WITH ERRORS', 2+8+16, 'COMPLETED WITH ERRORS', 'FAILED'), rsr_mbytes = mbytes WHERE rsr.rsr_stamp = stamp AND rsr.rsr_recid = recid AND (rsr.site_key = this_site_key OR rsr.site_key is null AND this_site_key is null) AND rsr.dbinc_key = this_dbinc_key; deb('updateRmanStatusRow - commit, release locks'); commit; END updateRmanStatusRow; /*-------------------* * Change Procedures * *-------------------*/ /* * In these change procedures, we don't check that we found the record, * because we are processing a DL record - i.e. we already processed the * other cf records and hence it might already be flagged deleted. This * applies to any status change because we might make it available and * then delete the object, so the object would be marked as deleted when * we process the object and the available status change (first DL) would * fail to find the object, so would a following DL that marks it deleted. */ PROCEDURE changeDatafileCopy( cdf_recid IN NUMBER ,cdf_stamp IN NUMBER ,status IN VARCHAR2 ,keep_options IN NUMBER DEFAULT NULL -- null means do not update ,keep_until IN DATE DEFAULT NULL ,osite_key IN number DEFAULT NULL -- old site_key for the record ,nsite_key IN number DEFAULT NULL -- null means do not update ) IS local dbinc%rowtype; fno cdf.file#%type; BEGIN IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; -- Determine if this is a controlfile copy or datafile copy. BEGIN -- bug 2719863: Add restriction on DBINC_KEY. SELECT file# into fno FROM cdf WHERE dbinc_key in (select dbinc_key from dbinc where db_key = this_db_key) AND ((osite_key is null AND cdf.site_key is null) OR cdf.site_key = nvl(osite_key, cdf.site_key)) AND cdf.cdf_recid = changeDatafileCopy.cdf_recid AND cdf.cdf_stamp = changeDatafileCopy.cdf_stamp; EXCEPTION WHEN no_data_found THEN BEGIN -- bug 2719863: Add restriction on DBINC_KEY. SELECT 0 into fno FROM ccf WHERE dbinc_key in (select dbinc_key from dbinc where db_key = this_db_key) AND ((osite_key is null AND ccf.site_key is null) OR ccf.site_key = nvl(osite_key, ccf.site_key)) AND ccf.ccf_recid = changeDatafileCopy.cdf_recid AND ccf.ccf_stamp = changeDatafileCopy.cdf_stamp; -- This is a controlfile, so call changeControlfileCopy(). changeControlfileCopy(cdf_recid, cdf_stamp, status, keep_options, keep_until, osite_key, nsite_key); RETURN; EXCEPTION WHEN no_data_found THEN RETURN; -- already deleted (we are processing a DL record) END; WHEN OTHERS THEN RAISE; END; IF status IS NULL THEN -- there is no need to change status so check if need -- to change keep attributes IF keep_until IS NOT NULL THEN UPDATE cdf SET keep_until = changeDatafileCopy.keep_until WHERE cdf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND cdf.site_key is null) OR cdf.site_key = nvl(osite_key, cdf.site_key)) AND cdf.cdf_recid = changeDatafileCopy.cdf_recid AND cdf.cdf_stamp = changeDatafileCopy.cdf_stamp; END IF; IF keep_options IS NOT NULL THEN UPDATE cdf SET keep_options = changeDatafileCopy.keep_options WHERE cdf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND cdf.site_key is null) OR cdf.site_key = nvl(osite_key, cdf.site_key)) AND cdf.cdf_recid = changeDatafileCopy.cdf_recid AND cdf.cdf_stamp = changeDatafileCopy.cdf_stamp; END IF; ELSIF status IN ('A','U','X') THEN -- 'see above comments for how file status field is changed' -- search on above string to find the relevant comments. UPDATE cdf SET status = changeDatafileCopy.status, site_key = nvl(nsite_key, site_key) WHERE cdf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND cdf.site_key is null) OR cdf.site_key = nvl(osite_key, cdf.site_key)) AND cdf.cdf_recid = changeDatafileCopy.cdf_recid AND cdf.cdf_stamp = changeDatafileCopy.cdf_stamp; -- when site ownership is grabbed, delete duplicate entries for this site IF sql%rowcount > 0 and nsite_key is not null THEN deleteDuplicateCDF(cdf_recid, cdf_stamp, null); END IF; ELSIF status IN ('R','D') THEN DELETE FROM cdf WHERE cdf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND cdf.site_key is null) OR cdf.site_key = nvl(osite_key, cdf.site_key)) AND cdf.cdf_recid = changeDatafileCopy.cdf_recid AND cdf.cdf_stamp = changeDatafileCopy.cdf_stamp; ELSE raise_application_error(-20100, 'Invalid status'); END IF; -- changeDatafileCopy is an atomic operation -- if called from deleted objects RESYNC do not release lock on db_inc, -- let endCkpt commit all changes. IF (this_ckp_key IS NULL) THEN deb('changeDatafileCopy - commit, release locks'); commit; END IF; END changeDatafileCopy; PROCEDURE changeControlfileCopy( cdf_recid IN NUMBER ,cdf_stamp IN NUMBER ,status IN VARCHAR2 ,keep_options IN NUMBER DEFAULT NULL -- null means do not update ,keep_until IN DATE DEFAULT NULL ,osite_key IN number DEFAULT NULL -- old site_key for the record ,nsite_key IN number DEFAULT NULL -- null means do not update ) IS local dbinc%rowtype; BEGIN IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; IF status IS NULL THEN -- there is no need to change status so check if need -- to change keep stuff IF keep_until IS NOT NULL THEN UPDATE ccf SET keep_until = changeControlfileCopy.keep_until WHERE ccf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND ccf.site_key is null) OR ccf.site_key = nvl(osite_key, ccf.site_key)) AND ccf.ccf_recid = changeControlfileCopy.cdf_recid AND ccf.ccf_stamp = changeControlfileCopy.cdf_stamp; END IF; IF keep_options IS NOT NULL THEN UPDATE ccf SET keep_options = changeControlfileCopy.keep_options WHERE ccf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND ccf.site_key is null) OR ccf.site_key = nvl(osite_key, ccf.site_key)) AND ccf.ccf_recid = changeControlfileCopy.cdf_recid AND ccf.ccf_stamp = changeControlfileCopy.cdf_stamp; END IF; ELSIF status IN ('A','U','X') THEN -- 'see above comments for how file status field is changed' -- search on above string to find the relevant comments. UPDATE ccf SET status = changeControlfileCopy.status, site_key = nvl(nsite_key, site_key) WHERE ccf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND ccf.site_key is null) OR ccf.site_key = nvl(osite_key,ccf.site_key)) AND ccf.ccf_recid = changeControlfileCopy.cdf_recid AND ccf.ccf_stamp = changeControlfileCopy.cdf_stamp; -- when site ownership is grabbed, delete duplicate entries for this site IF sql%rowcount > 0 and nsite_key is not null THEN deleteDuplicateCCF(cdf_recid, cdf_stamp, null); END IF; ELSIF status IN ('R','D') THEN DELETE FROM ccf WHERE ccf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND ccf.site_key is null) OR ccf.site_key = nvl(osite_key, ccf.site_key)) AND ccf.ccf_recid = changeControlfileCopy.cdf_recid AND ccf.ccf_stamp = changeControlfileCopy.cdf_stamp; ELSE raise_application_error(-20100, 'Invalid status'); END IF; -- changeControlfileCopy is an atomic operation -- if called from deleted objects RESYNC do not release lock on db_inc, -- let endCkpt commit all changes. IF (this_ckp_key IS NULL) THEN deb('changeControlfileCopy - commit, release locks'); commit; END IF; END changeControlfileCopy; PROCEDURE changeArchivedLog( al_recid IN NUMBER ,al_stamp IN NUMBER ,status IN VARCHAR2 ,osite_key IN NUMBER DEFAULT NULL -- old site_key for the record ,nsite_key IN NUMBER DEFAULT NULL -- null means do not update ) IS BEGIN IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; IF status IN ('A','U','X') THEN -- 'see above comments for how file status field is changed' is seen. -- search on above string to find the relevant comments. UPDATE al SET status = changeArchivedLog.status, site_key = nvl(nsite_key, site_key) WHERE al.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND al.al_recid = changeArchivedLog.al_recid AND al.al_stamp = changeArchivedLog.al_stamp AND ((osite_key is null AND al.site_key is null) OR al.site_key = nvl(osite_key, al.site_key)); -- when site ownership is grabbed, delete duplicate entries for this site IF sql%rowcount > 0 and nsite_key is not null THEN deleteDuplicateAL(al_recid, al_stamp, null); END IF; ELSIF status IN ('R','D') THEN -- Bug 1186598 - always delete the row. -- see compatibility change in translateArchivedLogPattern -- and getArchivedLog DELETE FROM al WHERE al.dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key) AND ((osite_key is null AND al.site_key is null) OR al.site_key = nvl(osite_key, al.site_key)) AND al.al_recid = changeArchivedLog.al_recid AND al.al_stamp = changeArchivedLog.al_stamp; ELSE raise_application_error(-20100, 'Invalid status'); END IF; -- changeArchivedLog is an atomic operation -- if called from deleted objects RESYNC do not release lock on db_inc, -- let endCkpt commit all changes. IF (this_ckp_key IS NULL) THEN deb('changeArchivedLog - commit, release locks'); commit; END IF; END changeArchivedLog; PROCEDURE changeBackupSet( recid IN number ,stamp IN number ,keep_options IN number -- null means do not update ,keep_until IN date ,osite_key IN number DEFAULT NULL -- old site_key for the record ,nsite_key IN number DEFAULT NULL -- null means do not update ) IS local bs%rowtype; CURSOR dflist IS SELECT * FROM bdf WHERE bdf.bs_key = local.bs_key; CURSOR rllist IS SELECT * FROM brl WHERE brl.bs_key = local.bs_key; BEGIN IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; BEGIN SELECT * INTO local FROM bs WHERE bs.db_key = this_db_key AND bs.bs_recid = changeBackupSet.recid AND bs.bs_stamp = changeBackupSet.stamp; EXCEPTION WHEN NO_DATA_FOUND THEN RETURN; -- already deleted (we are processing a DL record) END; IF keep_until IS NOT NULL THEN UPDATE bs SET bs.keep_until = changeBackupSet.keep_until WHERE bs.bs_key = local.bs_key; END IF; IF keep_options IS NOT NULL THEN UPDATE bs SET bs.keep_options = changeBackupSet.keep_options WHERE bs.bs_key = local.bs_key; -- Find the datafile or redo log backups and adjust restore point as needed IF (local.bck_type = 'L') THEN FOR rlrec IN rllist LOOP updateRestorePoint(rlrec.low_scn, rlrec.next_scn); END LOOP; END IF; IF (local.bck_type = 'D') THEN FOR dfrec IN dflist LOOP updateRestorePoint(dfrec.ckp_scn, null); END LOOP; END IF; END IF; IF nsite_key is not null THEN UPDATE bs SET site_key = nsite_key WHERE bs_key = local.bs_key; UPDATE bp SET site_key = nsite_key WHERE bs_key = local.bs_key; END IF; -- changeBackupSet is an atomic operation -- if called from deleted objects RESYNC do not release lock on db_inc, -- let endCkpt commit all changes. IF (this_ckp_key IS NULL) THEN deb('changeBackupSet - commit, release locks'); commit; END IF; END changeBackupSet; PROCEDURE changeBackupPiece( bp_recid IN NUMBER ,bp_stamp IN NUMBER ,status IN VARCHAR2 ,set_stamp IN NUMBER DEFAULT NULL ,set_count IN NUMBER DEFAULT NULL ,osite_key IN number DEFAULT NULL -- old site_key for the record ,nsite_key IN number DEFAULT NULL -- null means do not update ) IS CURSOR bsQ IS SELECT bs_key FROM bp WHERE bp.db_key = this_db_key AND ((osite_key is null AND bp.site_key is null) OR bp.site_key = nvl(osite_key, bp.site_key)) AND bp.bp_recid = changeBackupPiece.bp_recid AND bp.bp_stamp = changeBackupPiece.bp_stamp; bsQrec bsQ%ROWTYPE; totalbp number; chgbskey number := NULL; BEGIN IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; -- If set_stamp and set_count is valid, then find out the unique -- backupset that this change is refering to. IF (set_stamp is not null AND set_count is not null) THEN BEGIN SELECT bs_key INTO chgbskey FROM bs WHERE bs.db_key = this_db_key AND bs.set_stamp = changeBackupPiece.set_stamp AND bs.set_count = changeBackupPiece.set_count; EXCEPTION WHEN NO_DATA_FOUND THEN RETURN; -- already deleted (we are processing a DL record) END; deb('changeBackupPiece - chgbskey=' || chgbskey); ELSE -- Bug-4531791: -- If set_stamp and set_count is NULL, then it is a record that was -- created in pre-11g version. We cannot identify the backupset uniquely. -- So, we have to live with deleting all the backuppieces that matches -- bp_recid and bp_stamp. SELECT count(*) INTO totalbp FROM bp WHERE bp.db_key = this_db_key AND ((osite_key is null AND bp.site_key is null) OR bp.site_key = nvl(osite_key, bp.site_key)) AND bp.bp_recid = changeBackupPiece.bp_recid AND bp.bp_stamp = changeBackupPiece.bp_stamp AND bp.bs_key = nvl(chgbskey, bp.bs_key); deb('changeBackupPiece - number of backupsets match ' || totalbp); IF totalbp = 0 then RETURN; -- already deleted (we are processing a DL record) END IF; END IF; IF status in ('A','U','X') THEN -- 'see above comments for how file status field is changed' -- search on above string to find the relevant comments. UPDATE bp SET status = changeBackupPiece.status, site_key = nvl(nsite_key, site_key) WHERE bp.db_key = this_db_key AND ((osite_key is null AND bp.site_key is null) OR bp.site_key = nvl(osite_key, bp.site_key)) AND bp.bp_recid = changeBackupPiece.bp_recid AND bp.bp_stamp = changeBackupPiece.bp_stamp AND bp.bs_key = nvl(chgbskey, bp.bs_key); -- when site ownership is grabbed, delete duplicate entries for this site IF sql%rowcount > 0 and nsite_key is not null THEN -- clear site_key in backup set record, so that it is re-calculated -- by updateBackupSetRec procedure IF chgbskey is not null THEN UPDATE bs SET site_key=null WHERE bs_key = chgbskey; ELSE UPDATE bs SET site_key=null WHERE bs_key in (SELECT bs_key FROM bp WHERE bp.db_key = this_db_key AND ((osite_key is null AND bp.site_key is null) OR bp.site_key = nvl(osite_key, bp.site_key)) AND bp.bp_recid = changeBackupPiece.bp_recid AND bp.bp_stamp = changeBackupPiece.bp_stamp); END IF; deleteDuplicateBP(bp_recid, bp_stamp, chgbskey, null, null); END IF; ELSIF status not in ('R', 'D') THEN raise_application_error(-20100, 'Invalid status'); END IF; IF (chgbskey IS NULL) THEN FOR bsQrec in bsQ LOOP -- bsQrec cursor will not return any row if you delete before -- opening cursor. IF status in ('R', 'D') THEN DELETE FROM bp WHERE bp.db_key = this_db_key AND ((osite_key is null AND bp.site_key is null) OR bp.site_key = nvl(osite_key, bp.site_key)) AND bp.bp_recid = changeBackupPiece.bp_recid AND bp.bp_stamp = changeBackupPiece.bp_stamp AND bp.bs_key = bsQrec.bs_key; END IF; -- revalidate the backup set updateBackupSetRec(bsQrec.bs_key); END LOOP; ELSE IF status in ('R', 'D') THEN DELETE FROM bp WHERE bp.db_key = this_db_key AND ((osite_key is null AND bp.site_key is null) OR bp.site_key = nvl(osite_key, bp.site_key)) AND bp.bp_recid = changeBackupPiece.bp_recid AND bp.bp_stamp = changeBackupPiece.bp_stamp AND bp.bs_key = chgbskey; END IF; updateBackupSetRec(chgbskey); END IF; -- changeBackupPiece is an atomic operation -- if called from deleted objects RESYNC do not release lock on db_inc, -- let endCkpt commit all changes. IF (this_ckp_key IS NULL) THEN deb('changeBackupPiece - commit, release locks'); commit; END IF; END changeBackupPiece; PROCEDURE changeProxyCopy( pc_recid IN NUMBER ,pc_stamp IN NUMBER ,status IN VARCHAR2 ,keep_options IN NUMBER DEFAULT NULL -- null means do not update ,keep_until IN DATE DEFAULT NULL ,osite_key IN number DEFAULT NULL -- old site_key for the record ,nsite_key IN number DEFAULT NULL -- null means do not update ) IS low_scn number; next_scn number; xobjid rowid; -- proxy object rowid BEGIN IF this_db_key IS NULL THEN raise_application_error(-20021, 'Database not set'); END IF; IF status IS NULL THEN -- There is no need to change status so check if need -- to change keep stuff. IF keep_until IS NOT NULL THEN UPDATE xdf SET xdf.keep_until = changeProxyCopy.keep_until WHERE xdf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND xdf.site_key is null) OR xdf.site_key = nvl(osite_key, xdf.site_key)) AND xdf.xdf_recid = changeProxyCopy.pc_recid AND xdf.xdf_stamp = changeProxyCopy.pc_stamp; -- If it wasn't a proxy datafile, maybe it's a proxy controlfile. IF sql%rowcount = 0 THEN UPDATE xcf SET xcf.keep_until = changeProxyCopy.keep_until WHERE xcf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND xcf.site_key is null) OR xcf.site_key = nvl(osite_key, xcf.site_key)) AND xcf.xcf_recid = changeProxyCopy.pc_recid AND xcf.xcf_stamp = changeProxyCopy.pc_stamp; END IF; END IF; IF keep_options IS NOT NULL THEN SELECT min(ckp_scn), min(rowid) into low_scn, xobjid FROM xdf WHERE xdf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND xdf.site_key is null) OR xdf.site_key = nvl(osite_key, xdf.site_key)) AND xdf.xdf_recid = changeProxyCopy.pc_recid AND xdf.xdf_stamp = changeProxyCopy.pc_stamp; -- If it was a datafile, check for restore points and update the row IF xobjid IS NOT NULL THEN updateRestorePoint(low_scn, null); UPDATE xdf SET xdf.keep_options = changeProxyCopy.keep_options WHERE rowid = xobjid; ELSE -- If it was not a proxy datafile, maybe it is a proxy controlfile. UPDATE xcf SET xcf.keep_options = changeProxyCopy.keep_options WHERE xcf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND xcf.site_key is null) OR xcf.site_key = nvl(osite_key, xcf.site_key)) AND xcf.xcf_recid = changeProxyCopy.pc_recid AND xcf.xcf_stamp = changeProxyCopy.pc_stamp; -- If it was not a proxy controlfile, maybe it is a proxy archivelog. IF sql%rowcount = 0 THEN SELECT min(xal.low_scn), min(xal.next_scn), min(rowid) into low_scn, next_scn, xobjid FROM xal WHERE xal.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND xal.site_key is null) OR xal.site_key = nvl(osite_key, xal.site_key)) AND xal.xal_recid = changeProxyCopy.pc_recid AND xal.xal_stamp = changeProxyCopy.pc_stamp; -- Handle restore points and update the proxy archivelog IF xobjid IS NOT NULL THEN updateRestorePoint(low_scn, next_scn); UPDATE xal SET xal.keep_options = changeProxyCopy.keep_options WHERE rowid = xobjid; END IF; END IF; END IF; END IF; ELSIF status in ('A','U','X') THEN -- 'see above comments for how file status field is changed' -- search on above string to find the relevant comments. UPDATE xdf SET status = changeProxyCopy.status, site_key = nvl(nsite_key, site_key) WHERE xdf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND xdf.site_key is null) OR xdf.site_key = nvl(osite_key, xdf.site_key)) AND xdf.xdf_recid = changeProxyCopy.pc_recid AND xdf.xdf_stamp = changeProxyCopy.pc_stamp; -- when site ownership is grabbed, delete duplicate entries for this site IF sql%rowcount > 0 and nsite_key is not null THEN deleteDuplicateXDF(pc_recid, pc_stamp, null, null); END IF; -- if it was not a proxy datafile, maybe it is a proxy controlfile IF sql%rowcount = 0 THEN UPDATE xcf SET status = changeProxyCopy.status, site_key = nvl(nsite_key, site_key) WHERE xcf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND xcf.site_key is null) OR xcf.site_key = nvl(osite_key, xcf.site_key)) AND xcf.xcf_recid = changeProxyCopy.pc_recid AND xcf.xcf_stamp = changeProxyCopy.pc_stamp; -- when site ownership is grabbed, delete duplicate entries for this site IF sql%rowcount > 0 and nsite_key is not null THEN deleteDuplicateXCF(pc_recid, pc_stamp, null, null); END IF; END IF; -- if it was not a proxy controlfile, maybe it is a proxy archivedlog IF sql%rowcount = 0 THEN UPDATE xal SET status = changeProxyCopy.status, site_key = nvl(nsite_key, site_key) WHERE xal.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND xal.site_key is null) OR xal.site_key = nvl(osite_key, xal.site_key)) AND xal.xal_recid = changeProxyCopy.pc_recid AND xal.xal_stamp = changeProxyCopy.pc_stamp; -- when site ownership is grabbed, delete duplicate entries for this site IF sql%rowcount > 0 and nsite_key is not null THEN deleteDuplicateXAL(pc_recid, pc_stamp, null, null); END IF; END IF; ELSIF status IN ('R','D') THEN -- Get the checkpoint change# for restore points only if keep SELECT min(ckp_scn), min(rowid) into low_scn, xobjid FROM xdf WHERE xdf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND xdf.site_key is null) OR xdf.site_key = nvl(osite_key, xdf.site_key)) AND xdf.xdf_recid = changeProxyCopy.pc_recid AND xdf.xdf_stamp = changeProxyCopy.pc_stamp; -- Delete datafile IF xobjid IS NOT NULL THEN updateRestorePoint(low_scn, null); DELETE FROM xdf WHERE rowid = xobjid; ELSE -- if it was not a proxy datafile, maybe it is a proxy controlfile DELETE FROM xcf WHERE xcf.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND xcf.site_key is null) OR xcf.site_key = nvl(osite_key, xcf.site_key)) AND xcf.xcf_recid = changeProxyCopy.pc_recid AND xcf.xcf_stamp = changeProxyCopy.pc_stamp; -- if it was not a proxy controlfile, maybe it is a proxy archivedlog IF sql%rowcount = 0 THEN SELECT min(xal.low_scn), min(xal.next_scn), min(rowid) into low_scn, next_scn, xobjid FROM xal WHERE xal.dbinc_key in (select dbinc_key from dbinc where dbinc.db_key = this_db_key) AND ((osite_key is null AND xal.site_key is null) OR xal.site_key = nvl(osite_key, xal.site_key)) AND xal.xal_recid = changeProxyCopy.pc_recid AND xal.xal_stamp = changeProxyCopy.pc_stamp; IF xobjid IS NOT NULL THEN updateRestorePoint(low_scn, next_scn); DELETE FROM xal WHERE rowid = xobjid; END IF; END IF; END IF; ELSE raise_application_error(-20100, 'Invalid status'); END IF; -- changeProxyCopy is an atomic operation -- if called from deleted objects RESYNC do not release lock on db_inc, -- let endCkpt commit all changes. IF (this_ckp_key IS NULL) THEN deb('changeProxyCopy - commit, release locks'); commit; END IF; END changeProxyCopy; /*----------------------------* * Stored Script Procedures * *----------------------------*/ PROCEDURE createScript(name IN VARCHAR2) IS BEGIN createScript(name, NULL, FALSE); END; PROCEDURE createScript(name IN VARCHAR2, scr_com IN VARCHAR2, global IN boolean) IS foo NUMBER; dbkey NUMBER := this_db_key; BEGIN scr_key := NULL; -- for safety IF global THEN dbkey := NULL; scr_glob := TRUE; ELSE scr_glob := FALSE; IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; END IF; SELECT count(*) INTO foo FROM scr WHERE ((dbkey is not null and scr.db_key = dbkey) OR (dbkey is null and scr.db_key is null)) AND scr.scr_name = createScript.name; IF foo > 0 THEN raise_application_error(-20401, 'script '||name||' already exists'); END IF; INSERT INTO scr VALUES(rman_seq.nextval, dbkey, name, scr_com); SELECT rman_seq.currval INTO scr_key FROM dual; scr_line := 1; -- createScript is an atomic operation deb('createScript - commit, release locks'); commit; END; PROCEDURE replaceScript(name IN VARCHAR2) IS BEGIN replaceScript(name, NULL, FALSE); END; PROCEDURE replaceScript(name IN VARCHAR2, scr_com IN VARCHAR2, global IN boolean) IS dbkey NUMBER := this_db_key; BEGIN IF global THEN dbkey := NULL; scr_glob := TRUE; ELSE scr_glob := FALSE; IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; END IF; SELECT scr_key INTO scr_key FROM scr WHERE ((dbkey is not null and scr.db_key = dbkey) OR (dbkey is null and scr.db_key is null)) AND scr.scr_name = replaceScript.name; UPDATE scr SET scr_comment = scr_com WHERE scr.scr_key = dbms_rcvcat.scr_key; DELETE FROM scrl WHERE scrl.scr_key = dbms_rcvcat.scr_key; scr_line := 1; -- replaceScript is an atomic operation. deb('replaceScript - commit, release locks'); commit; EXCEPTION WHEN NO_DATA_FOUND THEN createScript(name, scr_com, global); END; PROCEDURE putLine(line IN VARCHAR2) IS BEGIN IF not scr_glob and this_db_key IS NULL THEN raise_application_error(-20021, 'Database not set'); END IF; IF (scr_key IS NULL) THEN raise_application_error(-20402, 'createScript or replaceScript not done'); END IF; INSERT INTO scrl(scr_key, linenum, text) VALUES(scr_key, scr_line, line); scr_line := scr_line + 1; END; PROCEDURE deleteScript(name IN VARCHAR2) IS BEGIN deleteScript(name, 0); END; PROCEDURE deleteScript(name IN VARCHAR2, glob IN NUMBER) IS dbkey NUMBER := this_db_key; BEGIN IF glob = 1 THEN dbkey := NULL; ELSE IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; END IF; SELECT scr_key INTO scr_key FROM scr WHERE ((dbkey is not null and scr.db_key = dbkey) OR (dbkey is null and scr.db_key is null)) AND scr.scr_name = deleteScript.name; DELETE FROM scr WHERE scr.scr_key = dbms_rcvcat.scr_key; scr_key := NULL; -- deleteScript is an atomic operation. deb('deleteScript - commit, release locks'); commit; EXCEPTION WHEN NO_DATA_FOUND THEN scr_key := NULL; raise_application_error(-20400, 'stored script not found'); END; PROCEDURE getScript(name IN VARCHAR2) IS BEGIN getScript(name, 0); END; PROCEDURE getScript(name IN VARCHAR2, glob IN NUMBER) IS dbkey NUMBER := this_db_key; BEGIN IF glob = 1 THEN dbkey := NULL; scr_glob := TRUE; ELSE scr_glob := FALSE; IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; END IF; SELECT scr_key INTO scr_key FROM scr WHERE ((dbkey is not null and scr.db_key = dbkey) OR (dbkey is null and scr.db_key is null)) AND scr.scr_name = getScript.name; IF scrlQ%ISOPEN THEN CLOSE scrlQ; END IF; OPEN scrlQ(scr_key); EXCEPTION WHEN NO_DATA_FOUND THEN scr_key := NULL; raise_application_error(-20400, 'stored script not found'); END; FUNCTION getLine RETURN VARCHAR2 IS scrl_row scrlQ%rowtype; BEGIN IF not scr_glob and this_db_key IS NULL THEN raise_application_error(-20021, 'Database not set'); END IF; IF NOT scrlQ%ISOPEN THEN raise_application_error(-20403, 'getScript not done'); END IF; FETCH scrlQ INTO scrl_row; IF scrlQ%NOTFOUND THEN -- end of fetch close scrlQ; return NULL; END IF; RETURN scrl_row.text; END; PROCEDURE commitChanges IS BEGIN deb('commitChanges - commit, release locks'); commit; END; -- version info -- Return all the protocol versions that we support, one at a time. -- Return them in ascending version number order. FUNCTION getPackageVersion RETURN VARCHAR2 IS BEGIN if version_counter > version_max_index then version_counter := 1; return null; end if; version_counter := version_counter + 1; return version_list(version_counter - 1); END; FUNCTION getCatalogVersion RETURN VARCHAR2 IS version rcver.version%type; BEGIN IF NOT rcverQ%ISOPEN THEN open rcverQ; END IF; FETCH rcverQ into version; IF rcverQ%NOTFOUND THEN -- end of fetch close rcverQ; return NULL; END IF; RETURN version; END; /*---------------------------------------* * Procedures for clone database support * *---------------------------------------*/ PROCEDURE setCloneName(file# IN NUMBER ,creation_change# IN NUMBER ,new_clone_fname IN VARCHAR2 ,old_clone_fname IN VARCHAR2 ,changedauxname OUT boolean ,plugin_change# IN NUMBER DEFAULT 0) IS lfname df.clone_fname%TYPE; BEGIN deb('setCloneName: file#=' || to_char(file#)|| ', creation_fname=' || to_char(nvl(creation_change#, ''))|| ', plugin_change#=' || to_char(nvl(plugin_change#, ''))|| ', old_clone_fname=' || old_clone_fname || ', new_clone_fname=' || new_clone_fname); changedauxname := FALSE; -- Note: if target database has been just upgraded to 9.0 then -- clone_fname is unknown and we will not change recovery catalog. IF (new_clone_fname = 'UNKNOWN') THEN RETURN; END IF; IF old_clone_fname is NULL THEN IF new_clone_fname = 'NONE' THEN -- No new auxname, just return RETURN; ELSE lfname := new_clone_fname; END IF; ELSE IF new_clone_fname = 'NONE' THEN lfname := NULL; ELSIF old_clone_fname = new_clone_fname THEN -- We already have the auxname, just return RETURN; ELSE lfname := new_clone_fname; END IF; END IF; UPDATE df SET df.clone_fname = lfname WHERE df.dbinc_key = this_dbinc_key AND df.file# = setCloneName.file# AND df.create_scn = setCloneName.creation_change# AND df.plugin_scn = setCloneName.plugin_change#; changedauxname := TRUE; deb('setCloneName - changed auxname for file# '||to_char(file#)|| ' from '||nvl(old_clone_fname, 'NULL')||' to '|| nvl(lfname, 'NULL')); EXCEPTION WHEN NO_DATA_FOUND THEN raise_application_error(-20105, 'datafile missing'); END; -- We need need this function in RCVCAT PL/SQL package because recover.txt -- cannot access Recovery Catalog RCVMAN PL/SQL package. FUNCTION getCloneName( file# IN NUMBER ,creation_change# IN NUMBER ,plugin_change# IN NUMBER DEFAULT 0) RETURN VARCHAR2 IS ret df.clone_fname%TYPE; BEGIN -- call getCloneName from rcvman package. Note that in this we call -- recovery catalog version! ret := dbms_rcvman.getCloneName(file#, creation_change#, plugin_change#); RETURN ret; EXCEPTION WHEN NO_DATA_FOUND THEN raise_application_error(-20105, 'datafile missing'); END; /*-----------------------------------* * Procedures for RMAN configuration * *-----------------------------------*/ -- 9i setConfig PROCEDURE setConfig(conf# IN NUMBER ,name IN VARCHAR2 ,value IN VARCHAR2) IS BEGIN IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; INSERT INTO conf( db_key, conf#, name, value, cleanup, db_unique_name, site_key) VALUES(this_db_key, conf#, name, value, 'YES', NULL, 0); EXCEPTION WHEN dup_val_on_index THEN UPDATE conf SET conf.name = name, conf.value = value WHERE conf.conf# = conf# AND conf.db_key = this_db_key; RETURN; END; -- setConfig used by 10i databases PROCEDURE setConfig2(conf# IN NUMBER ,name IN VARCHAR2 ,value IN VARCHAR2 ,nodespec IN BOOLEAN) IS lname conf.name%TYPE; lvalue conf.value%TYPE; BEGIN IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; -- Now, add the configuration itself. Generic configuration have -- db_unique_name value as NULL and site_key as 0. IF (nodespec) THEN INSERT INTO conf( db_key, conf#, name, value, cleanup, db_unique_name, site_key) VALUES(this_db_key, conf#, name, value, 'NO', this_db_unique_name, this_site_key); ELSE INSERT INTO conf( db_key, conf#, name, value, cleanup, db_unique_name, site_key) VALUES(this_db_key, conf#, name, value, 'NO', NULL, 0); END IF; deb('setConfig - Added name=(' || name || '), value=(' || value || ') to node ' || this_db_unique_name || '('|| conf# ||')'); EXCEPTION -- We should never have a record with that would have different -- (name,value). Assert the condition. WHEN dup_val_on_index THEN select name, value into lname, lvalue from conf where conf.conf# = setConfig2.conf# AND conf.db_key = this_db_key AND db_unique_name = this_db_unique_name; IF (lname = name AND lvalue = value) THEN RETURN; END IF; deb('setConfig - lname=' || lname || ', lvalue=' || lvalue); RAISE; WHEN others THEN deb('setConfig - this_db_unique_name='||this_db_unique_name|| ', conf#='||conf#); RAISE; END; -- setConfig3 used by 11i RMAN client to set remote site specific -- configurations directly in recovery catalog. FUNCTION setConfig3(name IN VARCHAR2 ,value IN VARCHAR2 ,db_unique_name IN VARCHAR2) RETURN NUMBER IS lname conf.name%TYPE NOT NULL := name; lvalue conf.value%TYPE NOT NULL := value; ldbuname conf.db_unique_name%TYPE NOT NULL := upper(db_unique_name); lsite_key NUMBER; lconf_key NUMBER; BEGIN IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; deb('setConfig3 - Remote setConfig for '||ldbuname); SELECT site_key into lsite_key from node where node.db_unique_name = ldbuname AND node.db_key = this_db_key; deb('setConfig3 - remote_site_key='||lsite_key); SELECT rman_seq.nextval INTO lconf_key FROM dual; -- Now, add the configuration itself. Generic configuration have -- db_unique_name value as NULL and site_key as 0. INSERT INTO conf( db_key, conf#, name, value, cleanup, db_unique_name, site_key) VALUES(this_db_key, lconf_key, name, value, 'NO', ldbuname, lsite_key); UPDATE node SET node.force_resync2cf = 'YES' WHERE node.db_key = this_db_key AND node.db_unique_name = ldbuname; deb('setConfig3 - commit, release locks'); COMMIT; deb('setConfig3 - Added name=(' || lname || '), value=(' || lvalue || ') to node ' || ldbuname || '('|| lconf_key ||')'); RETURN lconf_key; EXCEPTION WHEN OTHERS THEN deb('setConfig3 - rollback all, release locks'); ROLLBACK; RAISE; END; -- deleteConfig3 used by 11i RMAN client to delete remote site specific -- configurations directly in recovery catalog. PROCEDURE deleteConfig3(conf# IN NUMBER ,db_unique_name IN VARCHAR2) IS BEGIN IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; -- Must delete one or zero record, otherwise it is an internal error -- zero records will be deleted when getConfig returns configurations -- from primary database. DELETE conf WHERE conf.db_key = this_db_key AND conf.db_unique_name = deleteConfig3.db_unique_name AND conf.conf# = deleteConfig3.conf#; IF sql%rowcount <> 1 AND sql%rowcount <> 0 THEN raise_application_error(-20999, 'Internal error in deleteConfig3, deleted rows= ' || sql%rowcount); END IF; UPDATE node SET node.force_resync2cf = 'YES' WHERE node.db_key = this_db_key AND node.db_unique_name = deleteconfig3.db_unique_name; deb('deleteConfig3 - commit, release locks'); COMMIT; EXCEPTION WHEN OTHERS THEN deb('deleteConfig3 - rollback all, release locks'); ROLLBACK; RAISE; END; PROCEDURE resetConfig IS BEGIN IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; DELETE conf WHERE conf.db_key = this_db_key; EXCEPTION WHEN NO_DATA_FOUND THEN -- if db_key is not found, ignore this operation RETURN; END resetConfig; PROCEDURE resetConfig2 (nodespec IN BOOLEAN, high_conf_recid IN NUMBER DEFAULT NULL) IS BEGIN IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; -- The configuration rows from preivous oracle version have -- cleanup set to 'YES'. These rows should be deleted - regardless -- it they are generic or node specific configurations. DELETE conf WHERE conf.db_key = this_db_key AND conf.cleanup = 'YES'; -- If nodespec is TRUE, then we will delete only configuration -- that mach our db_unique_name - these are node specific -- configurations. IF (nodespec) THEN DELETE conf WHERE conf.db_key = this_db_key AND conf.db_unique_name = this_db_unique_name; ELSE -- If we are deleting generic (not node specific), then we should -- force resync from recovery catalog to control file for all -- other database. force_resync2cf := 'YES'; DELETE conf WHERE conf.db_key = this_db_key AND conf.db_unique_name IS NULL; END IF; IF high_conf_recid IS NOT NULL THEN last_conf_recid := high_conf_recid; deb('resetConfig2 - updated last_conf_recid=' || last_conf_recid); END IF; EXCEPTION WHEN NO_DATA_FOUND THEN -- if no configurations to delete, ignore this operation RETURN; END resetConfig2; PROCEDURE deleteConfig(conf# IN NUMBER) IS BEGIN -- This function is not called in 9.2/10.2 onwards. Not sure when/why -- this function was introduced. It would be safe to assert in this function. raise_application_error(-20999, 'Internal error in deleteConfig should not be called '); END; /*-------------------------* * Catalog upgrade support * *-------------------------*/ /* NOTES: * * These procedures *must* tolerate being called *before* dbinc_key * has been set. */ /*-------------------* * Utility functions * *-------------------*/ PROCEDURE bsStatusRecalc(status IN varchar2) IS cursor bsQ(status varchar2) IS SELECT bs_key FROM bs WHERE bs.status = bsStatusRecalc.status -- bug 6055481 AND bs.db_key = this_db_key; bsQrec bsQ%ROWTYPE; BEGIN FOR bsQrec in bsQ(status) LOOP updateBackupSetRec(bsQrec.bs_key); END LOOP; END; PROCEDURE reNormalize(newname IN varchar2, oldname OUT varchar2) IS BEGIN IF newname IS NULL THEN -- initialize IF reNorm_dfatt_c%ISOPEN THEN CLOSE reNorm_dfatt_c; END IF; IF reNorm_orl_c%ISOPEN THEN CLOSE reNorm_orl_c; END IF; IF reNorm_al_c%ISOPEN THEN CLOSE reNorm_al_c; END IF; IF reNorm_bp_c%ISOPEN THEN CLOSE reNorm_bp_c; END IF; IF reNorm_ccf_c%ISOPEN THEN CLOSE reNorm_ccf_c; END IF; IF reNorm_cdf_c%ISOPEN THEN CLOSE reNorm_cdf_c; END IF; IF reNorm_tfatt_c%ISOPEN THEN CLOSE reNorm_tfatt_c; END IF; reNorm_state := RENORM_DFATT; ELSE -- update the previous row IF reNorm_state = RENORM_DFATT THEN UPDATE site_dfatt SET fname = newname WHERE CURRENT OF reNorm_dfatt_c; ELSIF reNorm_state = RENORM_ORL THEN UPDATE orl SET fname = newname WHERE CURRENT OF reNorm_orl_c; ELSIF reNorm_state = RENORM_AL THEN UPDATE al SET fname = newname, fname_hashkey = substr(newname,1,10) || substr(newname, -10) WHERE CURRENT OF reNorm_al_c; ELSIF reNorm_state = RENORM_BP THEN UPDATE bp SET handle = newname, handle_hashkey = substr(device_type,1,10) || substr(newname,1,10) || substr(newname,-10) WHERE CURRENT OF reNorm_bp_c; ELSIF reNorm_state = RENORM_CCF THEN UPDATE ccf SET fname = newname, fname_hashkey = substr(newname,1,10) || substr(newname, -10) WHERE CURRENT OF reNorm_ccf_c; ELSIF reNorm_state = RENORM_CDF THEN UPDATE cdf SET fname = newname, fname_hashkey = substr(newname,1,10) || substr(newname, -10) WHERE CURRENT OF reNorm_cdf_c; ELSIF reNorm_state = RENORM_TFATT THEN UPDATE site_tfatt SET fname = newname WHERE CURRENT OF reNorm_tfatt_c; END IF; END IF; IF reNorm_state = RENORM_DFATT THEN IF NOT reNorm_dfatt_c%ISOPEN THEN OPEN reNorm_dfatt_c; END IF; FETCH reNorm_dfatt_c INTO oldname; IF reNorm_dfatt_c%NOTFOUND THEN CLOSE reNorm_dfatt_c; reNorm_state := RENORM_ORL; END IF; END IF; IF reNorm_state = RENORM_ORL THEN IF NOT reNorm_orl_c%ISOPEN THEN OPEN reNorm_orl_c; END IF; FETCH reNorm_orl_c INTO oldname; IF reNorm_orl_c%NOTFOUND THEN CLOSE reNorm_orl_c; reNorm_state := RENORM_AL; END IF; END IF; IF reNorm_state = RENORM_AL THEN IF NOT reNorm_al_c%ISOPEN THEN OPEN reNorm_al_c; END IF; FETCH reNorm_al_c INTO oldname; IF reNorm_al_c%NOTFOUND THEN CLOSE reNorm_al_c; reNorm_state := RENORM_BP; END IF; END IF; IF reNorm_state = RENORM_BP THEN IF NOT reNorm_bp_c%ISOPEN THEN OPEN reNorm_bp_c; END IF; FETCH reNorm_bp_c INTO oldname; IF reNorm_bp_c%NOTFOUND THEN CLOSE reNorm_bp_c; reNorm_state := RENORM_CCF; END IF; END IF; IF reNorm_state = RENORM_CCF THEN IF NOT reNorm_ccf_c%ISOPEN THEN OPEN reNorm_ccf_c; END IF; FETCH reNorm_ccf_c INTO oldname; IF reNorm_ccf_c%NOTFOUND THEN CLOSE reNorm_ccf_c; reNorm_state := RENORM_CDF; END IF; END IF; IF reNorm_state = RENORM_CDF THEN IF NOT reNorm_cdf_c%ISOPEN THEN OPEN reNorm_cdf_c; END IF; FETCH reNorm_cdf_c INTO oldname; IF reNorm_cdf_c%NOTFOUND THEN CLOSE reNorm_cdf_c; reNorm_state := RENORM_TFATT; END IF; END IF; IF reNorm_state = RENORM_TFATT THEN IF NOT reNorm_tfatt_c%ISOPEN THEN OPEN reNorm_tfatt_c; END IF; FETCH reNorm_tfatt_c INTO oldname; IF reNorm_tfatt_c%NOTFOUND THEN CLOSE reNorm_tfatt_c; reNorm_state := NULL; oldname := NULL; deb('reNormalize - commit, release locks'); commit; END IF; END IF; END reNormalize; -- The sanityCheck procedure can be used for any cleaning up of the recovery -- catalog that can be done solely by examination of the recovery catalog -- itself. It is the last thing that is done during resync, before commit. -- Forward declaration of functions and procedures used by sanityCheck PROCEDURE cleanupResyncedBS; PROCEDURE cleanupCKP; PROCEDURE cleanupRLH; PROCEDURE cleanupRSR; PROCEDURE cleanupBV; PROCEDURE cleanupROUT; PROCEDURE cleanupNRS; PROCEDURE sanityCheck IS BEGIN cleanupResyncedBS; cleanupCKP; cleanupRLH; cleanupRSR; cleanupBV; cleanupROUT; cleanupNRS; END sanityCheck; PROCEDURE cleanupResyncedBS IS cnt number; BEGIN -- Check that no backup sets have just been resynced -- for which there are no pieces. This can happen when backup sets are -- created in advance of the pieces and later on, no backup piece is actually -- inserted. deb('cleanupResyncedBS - cntbs='||cntbs); IF cntbs is NULL THEN raise_application_error(-20107, 'invalid bskey counter'); END IF; FOR i IN 1 .. cntbs LOOP SELECT count(*) into cnt from bs where bs_key = updatebs(i); IF cnt > 0 THEN deb('cleanupResyncedBS - updating bs_key='||updatebs(i)); updateBackupSetRec(updatebs(i)); END IF; END LOOP; cntbs := 0; END cleanupResyncedBS; PROCEDURE cleanupCKP IS scn NUMBER; seq NUMBER; keep_ckp_key_1 NUMBER; keep_ckp_key_2 NUMBER; start_time DATE := sysdate; BEGIN -- Remove unneeded rows from the CKP table. We can remove all rows EXCEPT: -- The rows that are referenced by foreign key columns from other tables. -- For the query in beginckpt, the row with the highest ckp_key. -- The one or two rows that are needed by getCheckpoint. IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; dbms_rcvman.getCheckpoint(scn, seq, keep_ckp_key_1, keep_ckp_key_2); deb('cleanupCKP - scn=' || scn); deb('cleanupCKP - seq=' || seq); deb('cleanupCKP - keep_ckp_key_1=' || keep_ckp_key_1); deb('cleanupCKP - keep_ckp_key_2=' || keep_ckp_key_2); -- In this statement, the ckp2 query defines the rows that must not be -- deleted. All other rows can be deleted. The outer join of ckp1 and ckp2 -- 'inverts' the selection, leaving just the rows that can be deleted. delete from ckp where dbinc_key = this_dbinc_key and ckp_key in (select ckp_key1 from (select ckp_key ckp_key1 from ckp where dbinc_key = this_dbinc_key) ckp1, (select keep_ckp_key_1 ckp_key2 from dual union select keep_ckp_key_2 from dual union select nvl(max(ckp_key),0) from ckp where dbinc_key=this_dbinc_key union select start_ckp_key from tsatt where dbinc_key = this_dbinc_key union select nvl(end_ckp_key,0) from tsatt where dbinc_key = this_dbinc_key) ckp2 where ckp_key1 = ckp_key2(+) and ckp_key2 is null) and site_key = this_site_key; deb('cleanupCKP - deleted ' || sql%rowcount || ' rows from ckp table'); deb('cleanupCKP - took ' || ((sysdate - start_time) * 86400) || ' seconds'); END cleanupCKP; PROCEDURE cleanupRLH IS oldscn NUMBER; start_time DATE := sysdate; BEGIN -- Remove unneeded rows from the RLH table. We can remove RLH rows that -- have an scn older than the oldest scn of all the backups and copies. -- The RLH records are only used when computing the SCN of a time given -- to recover. If no backups/copies exist that have that SCN, the RLH -- records are not needed. IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; SELECT nvl(min(scn),power(2,64)-1) INTO oldscn FROM ( SELECT min(brl.low_scn) scn FROM brl WHERE brl.dbinc_key = this_dbinc_key UNION SELECT min(al.low_scn) FROM al WHERE al.dbinc_key = this_dbinc_key UNION SELECT min(xal.low_scn) FROM xal WHERE xal.dbinc_key = this_dbinc_key UNION SELECT min(bdf.ckp_scn) FROM bdf WHERE bdf.dbinc_key = this_dbinc_key UNION SELECT min(cdf.ckp_scn) FROM cdf WHERE cdf.dbinc_key = this_dbinc_key UNION SELECT min(xdf.ckp_scn) FROM xdf WHERE xdf.dbinc_key = this_dbinc_key UNION SELECT min(bcf.ckp_scn) FROM bcf WHERE bcf.dbinc_key = this_dbinc_key UNION SELECT min(ccf.ckp_scn) FROM ccf WHERE ccf.dbinc_key = this_dbinc_key UNION SELECT min(xcf.ckp_scn) FROM xcf WHERE xcf.dbinc_key = this_dbinc_key ); deb('cleanupRLH - scn='||oldscn); DELETE FROM rlh WHERE rlh.dbinc_key = this_dbinc_key AND low_scn < oldscn; deb('cleanupRLH - deleted ' || sql%rowcount || ' rows from rlh table'); deb('cleanupRLH - took ' || ((sysdate - start_time) * 86400) || ' seconds'); END cleanupRLH; PROCEDURE cleanupBV IS start_time DATE := sysdate; BEGIN -- This procedure deletes all 'backup validate' backups that were -- created more than 180 days ago. This is the only -- place that backup validate rows are ever deleted. DELETE FROM bs WHERE db_key = this_db_key AND ((input_file_scan_only='YES' AND SYSDATE - completion_time >= 180) OR (nvl(input_file_scan_only,'NO')='NO' AND status='D')); deb('cleanupBV - deleted ' || sql%rowcount || ' rows from bs table'); deb('cleanupBV - took ' || ((sysdate - start_time) * 86400) || ' seconds'); END cleanupBV; FUNCTION getDbid RETURN NUMBER IS dbid NUMBER; BEGIN SELECT db.db_id INTO dbid FROM db WHERE db_key = this_db_key AND curr_dbinc_key = this_dbinc_key; RETURN dbid; EXCEPTION WHEN no_data_found THEN raise_application_error(-20001, 'Database not found'); END getDbid; FUNCTION beginIncarnationResync(return_Recid in boolean DEFAULT FALSE) RETURN NUMBER IS local_kccdivts number; BEGIN checkResync; IF return_Recid THEN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_ic_recid INTO last_ic_recid FROM node WHERE site_key = this_site_key; ELSE last_ic_recid := sessionWaterMarks.high_ic_recid; END IF; RETURN last_ic_recid; ELSE IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT last_kccdivts INTO local_kccdivts FROM node WHERE site_key = this_site_key; ELSE local_kccdivts := 0; END IF; IF (local_kccdivts IS NULL) THEN local_kccdivts := 0; END IF; RETURN local_kccdivts; END IF; END beginIncarnationResync; -- Return the correct incarnation for the supplied resetlogs data, taking -- the current 'this_db_key' into account. If no matching incarnation is -- found, then an artificial one is added. FUNCTION checkIncarnation(reset_scn IN NUMBER, reset_time IN DATE, prior_reset_scn IN NUMBER DEFAULT NULL, prior_reset_time IN DATE DEFAULT NULL, db_name IN VARCHAR2 DEFAULT 'UNKNOWN') RETURN NUMBER IS local dbinc%rowtype; prior_dbinc_key number := NULL; BEGIN BEGIN SELECT dbinc_key, parent_dbinc_key, db_name INTO local.dbinc_key, local.parent_dbinc_key, local.db_name FROM dbinc WHERE dbinc.db_key = this_db_key AND dbinc.reset_scn = checkIncarnation.reset_scn AND dbinc.reset_time = checkIncarnation.reset_time; EXCEPTION WHEN no_data_found THEN local.dbinc_key := NULL; local.parent_dbinc_key := NULL; local.db_name := 'UNKNOWN'; END; IF (local.parent_dbinc_key IS NULL AND checkIncarnation.prior_reset_scn IS NOT NULL) THEN BEGIN SELECT dbinc_key INTO prior_dbinc_key FROM dbinc WHERE dbinc.db_key = this_db_key AND dbinc.reset_scn = checkIncarnation.prior_reset_scn AND dbinc.reset_time = checkIncarnation.prior_reset_time; EXCEPTION WHEN no_data_found THEN prior_dbinc_key := NULL; END; END IF; IF (local.dbinc_key IS NOT NULL) THEN -- parent_dbinc_key not filled up? IF (local.parent_dbinc_key IS NULL AND prior_dbinc_key IS NOT NULL) THEN UPDATE dbinc SET parent_dbinc_key = prior_dbinc_key WHERE dbinc.dbinc_key = local.dbinc_key; END IF; -- db_name not filled up? IF (local.db_name != 'UNKNOWN' AND checkIncarnation.db_name != 'UNKNOWN') THEN UPDATE dbinc SET db_name = checkIncarnation.db_name WHERE dbinc.dbinc_key = local.dbinc_key; END IF; RETURN local.dbinc_key; END IF; -- the database incarnation was not found, create an artificial one BEGIN INSERT INTO dbinc (dbinc_key, db_key, db_name, reset_scn, reset_time, parent_dbinc_key) VALUES (rman_seq.nextval, this_db_key, upper(checkIncarnation.db_name), checkIncarnation.reset_scn, checkIncarnation.reset_time, prior_dbinc_key); EXCEPTION WHEN dup_val_on_index THEN raise_application_error(-20009, 'Db incarnation already registered'); END; SELECT rman_seq.currval INTO local.dbinc_key FROM dual; RETURN local.dbinc_key; END checkIncarnation; PROCEDURE endIncarnationResync(high_kccdivts IN NUMBER, high_ic_recid IN NUMBER DEFAULT 0) IS BEGIN -- If incarnation records are resynced based on last_ic_recid, then they -- are treated as any other circular records. IF (last_ic_recid IS NOT NULL) THEN IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN -- set the high_ic_recid for the next resync UPDATE node SET high_ic_recid = endIncarnationResync.high_ic_recid, last_kccdivts = endIncarnationResync.high_kccdivts WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_ic_recid := high_ic_recid; last_ic_recid := NULL; ELSE IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET last_kccdivts = high_kccdivts WHERE site_key = this_site_key; END IF; END IF; -- recompute chain of incarnation recomputeDbincStatus(this_db_key, this_dbinc_key); END endIncarnationResync; /*-----------------------------* * Normal restore point Resync * *-----------------------------*/ FUNCTION beginRestorePointResync RETURN NUMBER IS BEGIN checkResync; IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN SELECT high_nrsp_recid INTO last_nrsp_recid FROM node WHERE site_key = this_site_key; ELSE last_nrsp_recid := sessionWaterMarks.high_nrsp_recid; END IF; RETURN last_nrsp_recid; END beginRestorePointResync; PROCEDURE checkRestorePoint( nrsp_recid IN NUMBER ,nrsp_stamp IN NUMBER ,nrsp_name IN VARCHAR2 ,reset_scn IN NUMBER ,reset_time IN DATE ,to_scn IN NUMBER ,nrsp_time IN DATE ,create_time IN DATE ,deleted IN NUMBER ) IS my_dbinc_key NUMBER; inscheck NUMBER; BEGIN IF (last_nrsp_recid IS NULL) THEN raise_application_error(-20037, 'Invalid last recid'); END IF; IF (nrsp_recid < last_nrsp_recid) THEN raise_application_error(-20036, 'Invalid record order'); END IF; IF (nrsp_recid > last_nrsp_recid + 1) THEN -- there is gap in deleted object records -- not sure what we should do here NULL; END IF; last_nrsp_recid := nrsp_recid; IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; IF (nrsp_stamp < kccdivts) THEN deb('checkRestorePoint - ignoring record kccdivts='||kccdivts); RETURN; -- obsolete record from a backup controlfile END IF; -- find the dbinc_key my_dbinc_key := checkIncarnation(reset_scn, reset_time); -- delete or insert records, there is no update IF (deleted = 1) THEN DELETE nrsp WHERE checkRestorePoint.nrsp_recid = nrsp_recid AND checkRestorePoint.nrsp_stamp = nrsp_stamp AND my_dbinc_key = nrsp.dbinc_key AND this_site_key = site_key; ELSE INSERT INTO nrsp (nrsp_recid ,nrsp_stamp ,rspname ,dbinc_key ,site_key ,to_scn ,rsptime ,creation_time ,long_term) VALUES (checkRestorePoint.nrsp_recid ,checkRestorePoint.nrsp_stamp ,checkRestorePoint.nrsp_name ,my_dbinc_key ,this_site_key ,checkRestorePoint.to_scn ,checkRestorePoint.nrsp_time ,checkRestorePoint.create_time ,NULL); -- UNKNOWN: cleanupNRS will reset to YES/NO END IF; EXCEPTION WHEN dup_val_on_index THEN deb('checkRestorePoint - Inside dup_val_on_index exception for' || ' recid ' || checkRestorePoint.nrsp_recid || ' stamp ' || checkRestorePoint.nrsp_stamp); SELECT min(nrsp.nrsp_recid) INTO inscheck FROM nrsp WHERE nrsp.nrsp_recid = checkRestorePoint.nrsp_recid AND nrsp.nrsp_stamp = checkRestorePoint.nrsp_stamp AND nrsp.dbinc_key = my_dbinc_key AND nrsp.site_key = this_site_key AND nrsp.rspname = checkRestorePoint.nrsp_name AND nrsp.to_scn = checkRestorePoint.to_scn; IF inscheck IS NULL THEN -- Some internal error to indicate no match raise_application_error(-20037, 'Invalid last recid'); END IF; RETURN; WHEN no_data_found THEN RETURN; WHEN others THEN RAISE; END checkRestorePoint; PROCEDURE endRestorePointResync(lowrecid IN number) IS lowscn number; BEGIN -- Save the low recid, so we can purge restore points in cleanup -- IF (lowrecid = 0) THEN low_nrsp_recid := NULL; ELSE low_nrsp_recid := lowrecid; END IF; -- Fix up last recid -- IF (this_cf_type = 'CURRENT' OR (this_cf_type = 'STANDBY' AND this_db_unique_name is not null)) THEN UPDATE node SET high_nrsp_recid = last_nrsp_recid WHERE site_key = this_site_key; END IF; sessionWaterMarks.high_nrsp_recid := last_nrsp_recid; last_nrsp_recid := NULL; END endRestorePointResync; PROCEDURE listScriptNames(glob IN number, allnames IN number) IS lglob number := NULL; lalln number := NULL; BEGIN deb('listScriptNames - List script Names called with glob: '|| nvl(to_char(glob), 'NULL')||'and allnames: '|| nvl(to_char(allnames), 'NULL')); IF glob = 1 then lglob := 1; END IF; IF allnames = 1 then lalln := 1; END IF; IF lscrnames_c%ISOPEN THEN deb('listScriptNames - Closing lscrnames_c cursor'); CLOSE lscrnames_c; END IF; deb('listScriptNames - Opening lscrnames_c cursor'); OPEN lscrnames_c(lglob, lalln); END listScriptNames; PROCEDURE getScriptNames(dbname OUT varchar2, scnm OUT varchar2, sccom OUT varchar2) IS ldum number := NULL; BEGIN IF NOT lscrnames_c%ISOPEN THEN raise_application_error(-20403, 'listScriptNames not done'); END IF; deb('getScriptNames - Fetching lscrnames_c cursor'); FETCH lscrnames_c INTO ldum, dbname, scnm, sccom; IF lscrnames_c%NOTFOUND THEN deb('getScriptNames - Closing lscrnames_c cursor'); CLOSE lscrnames_c; raise no_data_found; END IF; END getScriptNames; -- The procedure cleanupRSR (deletes) all RC_RMAN_STATUS row -- older than 90 days which do not have any corresponding pieces. PROCEDURE cleanupRSR IS nowTime date; BEGIN SELECT SYSDATE INTO nowTime from dual; IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; -- In this statement, the query defines the rows that must not be -- deleted. All other rows can be deleted. The outer join of rsr and -- other tables 'inverts' the selection, leaving just the rows that -- can be deleted. DELETE FROM rsr WHERE rsr_end < nowTime-60 AND rsr.dbinc_key IN (select dbinc_key from dbinc where dbinc.db_key = this_db_key); deb('cleanupRSR - deleted ' || sql%rowcount || ' rows from rsr table'); deb('cleanupRSR - took ' || ((sysdate - nowTime) * 86400) || ' seconds'); END cleanupRSR; -- The procedure cleanupROUT (deletes) all RC_RMAN_OUTPUT rows corresponding -- to job older than 30 days. PROCEDURE cleanupROUT IS start_time date; high_stamp number; high_session_key number; BEGIN IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; start_time := SYSDATE; high_stamp := date2stamp(start_time-7); SELECT max(rsr_key) into high_session_key FROM rsr, dbinc WHERE dbinc.db_key = this_db_key AND rsr.dbinc_key = dbinc.dbinc_key AND rsr.rsr_stamp < high_stamp; deb('cleanupROUT select took ' || ((sysdate - start_time) * 86400) || ' seconds'); -- Delete rows from rout table for jobs older than 7 days. If high_session_key IS NOT NULL THEN DELETE FROM rout WHERE db_key = this_db_key AND rout_skey <= high_session_key; deb('cleanupROUT deleted ' || sql%rowcount || ' rows from rout table'); END IF; deb('cleanupROUT took ' || ((sysdate - start_time) * 86400) || ' seconds'); END cleanupROUT; -- Clean up Normal Restore Points -- PROCEDURE cleanupNRS IS start_time date; BEGIN deb('cleanupNRS - low_nrsp_recid is ' || NVL(TO_CHAR(low_nrsp_recid), 'NULL')); start_time := SYSDATE; -- Restore points maintain a unique relationship with backups based on -- SCN values. Anytime a backup changes in a way that may affect the -- keep attribute, any restore points near or at that SCN value may be -- affected. The LONG_TERM column indicates when a restore point may be -- used to restore a keep backup. So when a backup is -- added/dropped/changed and has keep attributes, I call -- updateRestorePoint to set the LONG_TERM column to NULL (unknown). -- This routine, which runs after all the backups have been resync'd into -- the catalog, will run a complex query to set the LONG_TERM column -- correctly. -- If not for the RC_RESTORE_POINT view, LONG_TERM would only be important -- for rows below low_nrsp_recid UPDATE nrsp SET LONG_TERM = 'YES' WHERE long_term IS NULL AND this_site_key = site_key AND nrsp_recid in (SELECT nrsp.nrsp_recid FROM bs, brl, nrsp WHERE bs.bs_key = brl.bs_key AND bs.keep_options > 0 AND brl.low_scn <= nrsp.to_scn AND brl.next_scn > nrsp.to_scn AND this_site_key = bs.site_key AND this_site_key = nrsp.site_key UNION SELECT nrsp.nrsp_recid FROM xal, nrsp WHERE xal.keep_options > 0 AND xal.low_scn <= nrsp.to_scn AND xal.next_scn > nrsp.to_scn AND this_site_key = xal.site_key AND this_site_key = nrsp.site_key UNION SELECT nrsp_recid FROM bs, bdf, nrsp WHERE bs.bs_key = bdf.bs_key AND bs.keep_options > 0 AND bdf.ckp_scn = nrsp.to_scn+1 AND this_site_key = bs.site_key AND this_site_key = nrsp.site_key UNION SELECT nrsp_recid FROM xdf, nrsp WHERE xdf.keep_options > 0 AND xdf.ckp_scn = nrsp.to_scn+1 AND this_site_key = xdf.site_key AND this_site_key = nrsp.site_key); deb('cleanupNRS - updated ' || sql%rowcount || ' rows to LONG_TERM = YES'); -- Any unknown rows left are not long term. UPDATE nrsp SET LONG_TERM = 'NO' WHERE long_term IS NULL AND this_site_key = site_key; deb('cleanupNRS - updated ' || sql%rowcount || ' rows to LONG_TERM = NO'); -- Remove all restore points not in the catalog -- and not supporting a backup (long_term) DELETE nrsp WHERE nrsp_recid < low_nrsp_recid AND long_term = 'NO' AND site_key = this_site_key; low_nrsp_recid := NULL; deb('cleanupNRS - deleted ' || sql%rowcount || ' rows from nrsp table'); deb('cleanupNRS - took ' || ((sysdate - start_time) * 86400) || ' seconds'); END; PROCEDURE updateOldestFlashbackSCN ( oldest_flashback_scn IN NUMBER -- obsolete column ,oldest_flashback_time IN DATE DEFAULT NULL ) IS tmp NUMBER; BEGIN deb('updateOldestFlashbackSCN - guaranteed_flashback_scn=' || nvl(to_char(oldest_flashback_scn), 'NULL') || ' flashback_time=' || nvl(to_char(oldest_flashback_time), 'NULL')); -- no guaranteed restore point and no flashback time IF (oldest_flashback_scn IS NULL AND oldest_flashback_time IS NULL) THEN DELETE FROM fb WHERE db_unique_name = this_db_unique_name AND dbinc_key = this_dbinc_key; RETURN; END IF; BEGIN SELECT 0 INTO tmp FROM fb WHERE db_unique_name = this_db_unique_name AND dbinc_key = this_dbinc_key; EXCEPTION WHEN no_data_found THEN INSERT INTO fb (dbinc_key, db_unique_name, oldest_flashback_scn, oldest_flashback_time) VALUES (this_dbinc_key, this_db_unique_name, oldest_flashback_scn, oldest_flashback_time); RETURN; WHEN others THEN RAISE; END; UPDATE fb SET oldest_flashback_scn = updateOldestFlashbackSCN.oldest_flashback_scn, oldest_flashback_time = updateOldestFlashbackSCN.oldest_flashback_time WHERE db_unique_name = this_db_unique_name AND dbinc_key = this_dbinc_key; END updateOldestFlashbackSCN; FUNCTION getDbinc RETURN NUMBER IS BEGIN IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; RETURN this_dbinc_key; END getDbinc; -- This function will be used by old version of RMAN client. It should have -- actually been in PRVTRMNU.SQL, but for some reason was added here... So -- lets leave this alone FUNCTION isDuplicateRecord(recid IN NUMBER ,stamp IN NUMBER ,type IN VARCHAR2) RETURN BOOLEAN IS rec_count NUMBER; BEGIN checkResync; IF (type = 'AL') THEN SELECT count(*) INTO rec_count FROM al, dbinc WHERE dbinc.db_key = this_db_key AND al.dbinc_key = dbinc.dbinc_key AND isDuplicateRecord.recid = al.al_recid AND isDuplicateRecord.stamp = al.al_stamp AND al.site_key = this_site_key; ELSIF (type = 'BP') THEN SELECT count(*) INTO rec_count FROM bp WHERE bp.db_key = this_db_key AND isDuplicateRecord.recid = bp.bp_recid AND isDuplicateRecord.stamp = bp.bp_stamp AND bp.site_key = this_site_key; ELSIF (type = 'DC') THEN SELECT count(*) INTO rec_count FROM cdf, dbinc WHERE dbinc.db_key = this_db_key AND cdf.dbinc_key = dbinc.dbinc_key AND isDuplicateRecord.recid = cdf.cdf_recid AND isDuplicateRecord.stamp = cdf.cdf_stamp AND cdf.site_key = this_site_key; IF (rec_count = 0) THEN SELECT count(*) INTO rec_count FROM ccf, dbinc WHERE dbinc.db_key = this_db_key AND ccf.dbinc_key = dbinc.dbinc_key AND isDuplicateRecord.recid = ccf.ccf_recid AND isDuplicateRecord.stamp = ccf.ccf_stamp AND ccf.site_key = this_site_key; END IF; ELSE raise_application_error(-20999, 'Internal error in isDuplicateRecord(): bad type '|| type); END IF; IF rec_count > 0 THEN RETURN TRUE; ELSE RETURN FALSE; END IF; END isDuplicateRecord; -- There are following cases when this function will return TRUE: -- 1) open resetlogs which resets watermark. -- 2) resync from controlfilecopy. -- 3) resync from backup controlfile (like BCV split mirror backup) -- 4) as a workaround for slow resync bug (as one reported in bug# 5899994) -- FUNCTION doDuplicateMining RETURN BOOLEAN IS last_recid number; BEGIN checkResync; IF (this_cf_type != 'CURRENT' and this_cf_type != 'STANDBY') THEN RETURN TRUE; END IF; -- For pre-10g database, the node table doesn't maintain watermarks -- for standby as db_unique_name wasn't present. IF (this_cf_type = 'STANDBY' and this_db_unique_name is NULL) THEN RETURN TRUE; END IF; -- just check one of the water marks to find out if water marks are -- maintained. If no water marks are maintained, then we need to do -- duplicate mining to find the last resync timestamp SELECT high_rlh_recid INTO last_recid FROM node WHERE site_key = this_site_key; IF (last_recid = 0) THEN deb('doDuplicateMining returns TRUE'); RETURN TRUE; ELSE RETURN FALSE; END IF; END doDuplicateMining; PROCEDURE unregisterSite(db_unique_name IN VARCHAR2, incbcks IN BINARY_INTEGER ) IS lsite_key number; new_ckp_site_key number; db_role node.database_role%TYPE; BEGIN deb('unregisterSite - remove meta-data for node '|| db_unique_name); IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; -- disallow unregistering connected target database IF this_db_unique_name = upper(db_unique_name) THEN raise_application_error(-20244, db_unique_name || ' can not unregister connected target database'); END IF; -- check if site is known to the recovery catalog. BEGIN select site_key, database_role into lsite_key, db_role from node where node.db_unique_name = upper(unregisterSite.db_unique_name) and node.db_key = this_db_key; EXCEPTION WHEN no_data_found THEN raise_application_error( -20243, upper(unregisterSite.db_unique_name) || ' db_unique_name unknown to recovery catalog:'); END; -- If there were full resync ever done from this site, and there are some -- tablespace attributes changes resynced, we need to retain those CKP -- rows. So, update site_key value for such rows to either primary or -- other standby site. select site_key into new_ckp_site_key from node where db_key=this_db_key and site_key <> lsite_key and rownum = 1 order by database_role; IF new_ckp_site_key is not null THEN update ckp set site_key = new_ckp_site_key where site_key = lsite_key and ckp_type = 'FULL' and ckp_key in (select start_ckp_key from tsatt where dbinc_key in (select dbinc_key from dbinc where db_key=this_db_key) union select end_ckp_key from tsatt where dbinc_key in (select dbinc_key from dbinc where db_key=this_db_key)); deb('updated ' || sql%rowcount || ' rows in ckp, site_key to ' || new_ckp_site_key); END IF; -- remove the site owner ship for backups owned by site being un-registered IF incbcks <> 0 THEN delete bs WHERE site_key = lsite_key; deb('deleted ' || sql%rowcount || ' rows from bs table'); delete bp WHERE site_key = lsite_key; deb('deleted ' || sql%rowcount || ' rows from bp table'); delete ccf WHERE site_key = lsite_key; deb('deleted ' || sql%rowcount || ' rows from ccf table'); delete xcf WHERE site_key = lsite_key; deb('deleted ' || sql%rowcount || ' rows from xcf table'); delete cdf WHERE site_key = lsite_key; deb('deleted ' || sql%rowcount || ' rows from cdf table'); delete xdf WHERE site_key = lsite_key; deb('deleted ' || sql%rowcount || ' rows from xdf table'); delete xal WHERE site_key = lsite_key; deb('deleted ' || sql%rowcount || ' rows from xal table'); ELSE update bs set site_key = NULL WHERE site_key = lsite_key; deb('updated ' || sql%rowcount || ' rows from bs table'); update bp set site_key = NULL WHERE site_key = lsite_key; deb('updated ' || sql%rowcount || ' rows from bp table'); update ccf set site_key = NULL WHERE site_key = lsite_key; deb('updated ' || sql%rowcount || ' rows from ccf table'); update xcf set site_key = NULL WHERE site_key = lsite_key; deb('updated ' || sql%rowcount || ' rows from xcf table'); update cdf set site_key = NULL WHERE site_key = lsite_key; deb('updated ' || sql%rowcount || ' rows from cdf table'); update xdf set site_key = NULL WHERE site_key = lsite_key; deb('updated ' || sql%rowcount || ' rows from xdf table'); update xal set site_key = NULL WHERE site_key = lsite_key; deb('updated ' || sql%rowcount || ' rows from xal table'); END IF; -- delete node row deletes entries from site_dfatt, al, site_tfatt, orl etc delete node where site_key = lsite_key; deb('deleted ' || sql%rowcount || ' rows from node table'); delete fb where db_unique_name = unregisterSite.db_unique_name and dbinc_key in (select dbinc_key from dbinc where db_key = this_db_key); deb('deleted ' || sql%rowcount || ' rows from fb table'); -- remove connect identifier configuration for the site -- The code below assumes that the first character of "value" is a quote -- Use the first quote character to find the second quote character. delete conf where name = 'DB_UNIQUE_NAME' and db_key = this_db_key and upper(unregisterSite.db_unique_name) = upper(substr(value, 2, instr(substr(value, 2, 32), substr(value, 1,1))-1)) and db_unique_name is null; deb('deleted ' || sql%rowcount || ' rows from conf table(2)'); -- remote connect identifier configuration from all control file also on -- next resync. if sql%rowcount <> 0 then update node set force_resync2cf = 'YES' where db_key = this_db_key; end if; -- remove all site specific configuration delete conf where site_key = lsite_key; deb('deleted ' || sql%rowcount || ' rows from conf table (site rows)'); commit; END unregisterSite; ------------------------------- renameSite ------------------------------------ PROCEDURE renameSite(from_db_unique_name IN VARCHAR2, to_db_unique_name IN VARCHAR2) IS rec_count NUMBER; BEGIN deb('renameSite - rename meta-data from '|| from_db_unique_name || ' to ' || to_db_unique_name); IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'Database not set'); END IF; -- disallow renaming connected target database IF this_db_unique_name = upper(from_db_unique_name) THEN raise_application_error(-20244, from_db_unique_name || ' can not rename connected target database'); END IF; -- check if from_db_unique_name is known to the recovery catalog. SELECT count(*) INTO rec_count FROM node WHERE node.db_unique_name = upper(from_db_unique_name) AND node.db_key = this_db_key; IF rec_count = 0 THEN raise_application_error(-20243, from_db_unique_name || ' site unknown to recovery catalog:'); END IF; -- ensure to_db_unique_name is not known to the recovery catalog. SELECT count(*) INTO rec_count FROM node WHERE node.db_unique_name = upper(to_db_unique_name) AND node.db_key = this_db_key; IF rec_count = 1 THEN raise_application_error(-20246, to_db_unique_name || ' site known to recovery catalog:'); END IF; UPDATE NODE SET db_unique_name = upper(to_db_unique_name) WHERE db_unique_name = upper(from_db_unique_name) AND db_key = this_db_key; deb('renamed db_unique_name ' || sql%rowcount || ' row updated'); UPDATE CONF SET db_unique_name = upper(to_db_unique_name) WHERE db_unique_name = upper(from_db_unique_name) AND db_key = this_db_key; deb('updated ' || sql%rowcount || ' rows in conf table'); -- TODO, remove db_unique_name from flashback table UPDATE FB SET db_unique_name = upper(to_db_unique_name) WHERE db_unique_name = upper(from_db_unique_name) AND dbinc_key IN (select dbinc_key from dbinc where db_key = this_db_key); deb('updated ' || sql%rowcount || ' rows in fb table'); COMMIT; END renameSite; -- The following procedure stores a new entry in the node table -- if it doesn't already exist PROCEDURE resyncAddDBUname(cdbunstr IN varchar2) IS dbuname varchar2(30); numentries number; BEGIN -- The cdbunstr contains a string of the form: -- " CONNECT IDENTIFIER ". -- The connect_id may be in either single or double quotes. -- the db_unique_name may be either single or double quotes. -- We have to extract the unquoted db_unique_name and look it up -- in the node table for our db_id. -- If it doesn't exist, then we have to add an entry for this -- db_unique_name to the node table - note that not all columns are -- populated. We assume that this is a STANDBY node, since generic -- configuration changes can only be made at the primary. -- deb('resyncAddDBUname - cdbunstr = '|| cdbunstr); dbuname := substr(cdbunstr, 2, 30); -- strip out the first quote -- strip out the second quote. Assumes that the FIRST character -- in cdbunstr is a quote character. deb('resyncAddDBUname - dbuname before = '|| dbuname); dbuname := substr(dbuname, 1, instr(dbuname, substr(cdbunstr,1,1))-1); deb('resyncAddDBUname - db_unique_name = '|| dbuname); -- Add a new entry as a STANDBY site. If the entry already exists, -- we will get a constraint violation exception which we will ignore. insert into node (db_unique_name, db_key, force_resync2cf, database_role, site_key) values(upper(dbuname), this_db_key, 'YES', 'STANDBY', rman_seq.nextval); deb('resyncAddDBUname - Added entry to node table with value ' || dbuname); EXCEPTION WHEN dup_val_on_index THEN -- we already have an entry. nothing to do RETURN; END resyncAddDBUname; ------------------------------- getThisSiteKey -------------------------------- FUNCTION getThisSiteKey(db_unique_name in VARCHAR2 DEFAULT NULL) return NUMBER IS ret_site_key number; BEGIN deb('getThisSiteKey - This site key is '||this_site_key); if db_unique_name is not null then ret_site_key := dbms_rcvman.getSiteKey(db_unique_name); else ret_site_key := this_site_key; end if; deb('Returning site key is '||ret_site_key); return ret_site_key; END getThisSiteKey; PROCEDURE enableResyncActions IS BEGIN deb('enableResyncActions - resync action tracing enabled'); doResyncReasons := TRUE; END enableResyncActions; PROCEDURE setReason(reason IN number, forceSet IN boolean default FALSE) IS BEGIN IF doResyncReasons THEN -- We only set the reason if we do reason is NONE or asked to forcefully -- set it IF resync_reason = RESYNC_REASON_NONE OR forceSet THEN resync_reason := reason; deb('setReason - resync_reason: '||to_char(resync_reason)); END IF; ELSE resync_reason := RESYNC_REASON_NOACTION; END IF; END setReason; FUNCTION getReason RETURN number IS BEGIN IF doResyncReasons THEN deb('getReason - resync_reason: '||to_char(resync_reason)); RETURN resync_reason; ELSE RETURN RESYNC_REASON_NOACTION; END IF; END getReason; PROCEDURE incResyncActions(action IN number, objno IN number, objname IN varchar2) IS BEGIN IF not doResyncReasons THEN deb('incResynActions - Not debugging'); RETURN; END IF; BEGIN deb('incResynActions - for action: '||to_char(action)||' objno '|| nvl(to_char(objno), 'IS NULL')||' objname '||nvl(objname, 'IS NULL'), RCVCAT_LEVEL_HI); IF debOK(RCVCAT_LEVEL_HI) THEN dumpResyncActions; END IF; IF not fullResyncAction.active THEN RETURN; END IF; IF objno is NOT NULL THEN IF fullResyncAction.lastobjno = objno THEN IF fullResyncAction.actTaken(action) THEN -- we have already done this object, ignore deb('incResyncActions - '|| RESYNC_ACTION_OBJECTS(fullResyncAction.objtype)||' '|| to_char(objno)||' already '|| RESYNC_ACTION_NAMES(action), RCVCAT_LEVEL_HI); RETURN; ELSE fullResyncAction.actTaken(action) := TRUE; END IF; ELSE -- new objno fullResyncAction.lastobjno := objno; fullResyncAction.actTaken := resyncActionTaken_t(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE); fullResyncAction.actTaken(action) := TRUE; END IF; END IF; fullResyncAction.actCount(action) := fullResyncAction.actCount(action) + 1; fullResyncAction.valid := TRUE; IF objno is NOT NULL THEN IF objname is NOT NULL THEN deb('incResyncActions - '|| RESYNC_ACTION_OBJECTS(fullResyncAction.objtype)||' '|| objname||'('||to_char(objno)||') '|| RESYNC_ACTION_NAMES(action), RCVCAT_LEVEL_HI); ELSE deb('incResyncActions - '|| RESYNC_ACTION_OBJECTS(fullResyncAction.objtype)||' '|| to_char(objno)||' '|| RESYNC_ACTION_NAMES(action), RCVCAT_LEVEL_HI); END IF; ELSE deb('incResyncActions - '|| RESYNC_ACTION_OBJECTS(fullResyncAction.objtype)||' '|| to_char(objname)||' '|| RESYNC_ACTION_NAMES(action), RCVCAT_LEVEL_HI); END IF; deb('incResyncActions - Exiting', RCVCAT_LEVEL_HI); EXCEPTION WHEN others THEN deb('incResyncActions - caught exception '|| substr(sqlerrm, 1, 132) || ' for '|| to_char(action) || ' objno ' || nvl(to_char(objno), 'IS NULL') || ' objname ' || nvl(objname, 'IS NULL')); END; END incResyncActions; PROCEDURE dumpResyncActions IS i number; BEGIN IF not doResyncReasons OR not debOK(RCVCAT_LEVEL_HI) THEN RETURN; END IF; deb('dumpResyncActions - resync_reason: '||to_char(nvl(resync_reason, -1))); IF resync_reason = RESYNC_REASON_NOACTION THEN RETURN; END IF; IF fullResyncAction.active THEN deb('dumpResyncActions - Container is active'); ELSE deb('dumpResyncActions - Container is NOT active'); END IF; IF fullResyncAction.valid THEN deb('dumpResyncActions - Container is valid'); ELSE deb('dumpResyncActions - Container is NOT valid'); END IF; IF fullResyncAction.objtype IS NOT NULL THEN deb('dumpResyncActions - objtype: '|| RESYNC_ACTION_OBJECTS(fullResyncAction.objtype)); ELSE deb('dumpResyncActions - objtype is NULL'); END IF; IF fullResyncAction.lastobjno IS NOT NULL THEN deb('dumpResyncActions - lastobjno: '|| to_char(fullResyncAction.lastobjno)); ELSE deb('dumpResyncActions - lastobjno is NULL'); END IF; FOR i IN 1..6 LOOP IF fullResyncAction.actTaken(i) THEN deb('dumpResyncActions - '||RESYNC_ACTION_NAMES(i)||' TRUE - '|| fullResyncAction.actCount(i)); ELSE deb('dumpResyncActions - '||RESYNC_ACTION_NAMES(i)||' FALSE - '|| fullResyncAction.actCount(i)); END IF; END LOOP; END dumpResyncActions; PROCEDURE getResyncActions(valid OUT boolean ,added OUT number ,dropped OUT number ,changed OUT number ,recreated OUT number ,renamed OUT number ,resized OUT number) IS BEGIN IF doResyncReasons THEN IF debOK(RCVCAT_LEVEL_HI) THEN deb('getResyncActions - called', RCVCAT_LEVEL_HI); dumpResyncActions; END IF; fullResyncAction.active := FALSE; valid := fullResyncAction.valid; fullResyncAction.valid := FALSE; added := fullResyncAction.actCount(RESYNC_ACTION_ADD); dropped := fullResyncAction.actCount(RESYNC_ACTION_DROP); changed := fullResyncAction.actCount(RESYNC_ACTION_CHANGE); recreated := fullResyncAction.actCount(RESYNC_ACTION_RECREATE); renamed := fullResyncAction.actCount(RESYNC_ACTION_RENAME); resized := fullResyncAction.actCount(RESYNC_ACTION_RESIZE); setReason(RESYNC_REASON_NONE, TRUE); ELSE setReason(RESYNC_REASON_NOACTION, TRUE); END IF; END getResyncActions; PROCEDURE clearResyncActions IS BEGIN fullResyncAction.active := FALSE; fullResyncAction.valid := FALSE; fullResyncAction.lastobjno := -1; fullResyncAction.objtype := NULL; fullResyncAction.actTaken := resyncActionTaken_t(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE); fullResyncAction.actCount := resyncActionCounts_t(0, 0, 0, 0, 0, 0); dumpResyncActions; END clearResyncActions; /*-------------------------------------------------* * Private functions for import catalog processing * *-------------------------------------------------*/ -- -- adjustRmanSeq - adjust rman sequence -- -- It reserves the rman_seq value for importing the source recovery -- catalog database into this schema. -- PROCEDURE adjustRmanSeq IS currval number; newval number; incseq number; BEGIN LOOP SELECT rman_seq.nextval INTO currval FROM dual; EXECUTE IMMEDIATE 'SELECT rman_seq.nextval@' || import_dblink || ' FROM dual' INTO incseq; EXECUTE IMMEDIATE 'ALTER SEQUENCE rman_seq INCREMENT BY ' || incseq; SELECT (rman_seq.nextval - incseq) INTO import_offset FROM dual; EXECUTE IMMEDIATE 'ALTER SEQUENCE rman_seq INCREMENT BY 1'; -- If i don't see the import_offset >= currval, then some other process -- concurrently executed this function and changed rman_seq -- increment counter before i did select into import_offset. So, retry -- again. EXIT WHEN (import_offset >= currval); END LOOP; END adjustRmanSeq; -- -- isColumnASeq - is column type genearted by rman sequence -- -- Basically checks if the column name ends with KEY and is one of listed -- key_columns. If so, return TRUE. Otherwise, FALSE. Used to offset -- the key column values when importing catalog schema. -- FUNCTION isColumnASeq( column_name IN varchar2) RETURN BOOLEAN IS BEGIN IF (column_name like '%KEY') THEN FOR i in 1..key_columns.count LOOP IF (key_columns(i) = column_name) THEN RETURN TRUE; END IF; END LOOP; -- You are here because you defined a column that ends with -- KEY but forgot to add it to key_columns table. If your -- column value isn't generated by rman_seq, then you need -- to rename the column name so that it doesn't end with KEY. raise_application_error(-20999, 'Internal error in ' || 'isColumnASeq(): bad column '|| column_name); END IF; RETURN FALSE; END isColumnASeq; -- -- getColumnName - get column name -- -- Given a table name and offset, this function returns the column name -- separated by comma and ordered by name. If a offset is provided, then -- adds the offset to the column name. -- FUNCTION getColumnName( table_name IN varchar2 ,offset IN number DEFAULT NULL) RETURN VARCHAR2 IS v_table user_objects.object_name%TYPE; v_column varchar2(1024) := to_char(null); isaseq boolean; CURSOR column_c(tname varchar2) IS SELECT column_name, data_type FROM user_tab_columns WHERE table_name = tname ORDER BY column_name; FUNCTION add_comma(v_column IN varchar2) RETURN varchar2 IS BEGIN IF (v_column is null) THEN RETURN null; ELSE RETURN ','; END IF; END add_comma; FUNCTION add_offset( offset IN number ,data_type IN varchar2 ,column_name IN varchar2) RETURN varchar2 IS BEGIN IF (offset is null) THEN RETURN null; END IF; IF (data_type = 'NUMBER' AND isColumnASeq(column_name)) THEN RETURN '+' || offset; END IF; RETURN null; END add_offset; BEGIN SELECT object_name INTO v_table FROM user_objects WHERE object_name = upper(table_name) AND object_type = 'TABLE'; -- form the column names in ascending order FOR cRec in column_c(v_table) LOOP v_column := v_column || add_comma(v_column) || table_name || '.' || cRec.column_name || add_offset(offset, cRec.data_type, cRec.column_name); END LOOP; RETURN v_column; END getColumnName; -- -- importTable - import content of table into recovery catalog database. -- -- Given a table name and offset, this function extracts the rows from -- the source recovery catalog database and inserts the rows into -- recovery catalog database incrementing the value of key columns -- by the offset provided. -- -- from_table and where_clause is used to join table_name in order to -- filter interested columns. Usulaly, from_table contains idb or -- idbinc table name and the where_clause should contain how to -- join those tables with from_table. -- -- Example -- If there is a table named foo with columns (db_key, myname), then -- you have to import it by joining idb using db_key with idb.db_key. -- So, the importTable usage will look like. -- -- from_table.delete; -- from_table(1) := idb; -- importTable(table_name => 'foo' -- ,from_table => from_table -- ,uniq_rows => FALSE -- ,where_clause => 'where foo.db_key = ' || idb || '.db_key' -- ); -- PROCEDURE importTable( table_name IN varchar2 ,from_table IN ts_name_list ,uniq_rows IN boolean ,where_clause IN varchar2) IS insert_columns varchar2(2048); from_columns varchar2(2048); source_table varchar2(2048); uniq_keyword varchar2(8); start_time date; BEGIN deb('Entering importTable table=' || table_name); start_time := sysdate; insert_columns := getColumnName(table_name); from_columns := getColumnName(table_name, import_offset); source_table := table_name || '@' || import_dblink; FOR i in 1..from_table.count LOOP source_table := source_table || ', ' || from_table(i) || '@' || import_dblink; END LOOP; IF (uniq_rows) THEN uniq_keyword := 'DISTINCT'; ELSE uniq_keyword := ''; END IF; -- -- Basically doing- -- -- INSERT INTO table_name(column1, column2, column3) -- (SELECT DISTINCT column1+offset, column2+offset, column3+offset -- FROM table_name@dblink, from_table@dblink -- where_clause); -- EXECUTE IMMEDIATE 'INSERT INTO ' || table_name || ' (' || insert_columns || ')' || ' (SELECT ' || uniq_keyword || ' ' || from_columns || ' FROM ' || source_table || ' ' || where_clause || ')'; deb('imported rows = ' || sql%rowcount); deb('importTable took ' || ((sysdate - start_time) * 86400) || ' seconds'); deb('Finished importTable table=' || table_name); END importTable; -- -- registerImportDb - register all the database that has to be -- imported in recovery catalog database. -- -- Create db, dbinc entries for the database that has to be imported. -- This can't be done using importTable because db and dbinc are -- dependent on each other (see constraint db_f1). -- PROCEDURE registerImportDb( idb IN varchar2 ,idbinc IN varchar2) IS TYPE cur_typ IS ref CURSOR; update_c cur_typ; from_table ts_name_list; from_columns varchar2(2048); currkeys numTab_t; dbids numTab_t; dbkeys numTab_t; BEGIN from_columns := getColumnName('db', import_offset); deb('Entering registerImportDb'); -- -- Basically extracing db_key and db_id from source database. -- -- INSERT INTO db(db.db_key, db.db_id) -- (SELECT db.db_key+offset, db.db_id+offset -- FROM db@dblink, idb@dblink -- WHERE db.db_key = idb.db_key); -- EXECUTE IMMEDIATE 'INSERT INTO db (db.db_key, db.db_id)' || ' (SELECT db.db_key + ' || import_offset ||' , db.db_id' || ' FROM db@' || import_dblink || ',' || idb || '@' || import_dblink || ' WHERE db.db_key = ' || idb || '.db_key)'; deb('Total db imported = ' || sql%rowcount); -- Now import all the dbinc keys from_table.delete; from_table(1) := idbinc; importTable(table_name => 'dbinc' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where dbinc.dbinc_key = ' || idbinc || '.dbinc_key'); -- Set the curr_dbinc_key in db table. We do this using -- bulk update. OPEN update_c FOR -- -- Extract curr_dbinc_keys from source database. Basically doing -- -- SELECT (column1, column2, column2) -- FROM db@dblink, idb@dblink -- WHERE db.db_key = idb.db_key; -- 'SELECT ' || from_columns || ' FROM db@' || import_dblink || ',' || idb || '@' || import_dblink || ' WHERE db.db_key = ' || idb || '.db_key'; FETCH update_c BULK COLLECT INTO currkeys, dbids, dbkeys; CLOSE update_c; -- Now update all curr_dbinc_key in one shot. FORALL i in 1..dbids.count UPDATE db SET curr_dbinc_key = currkeys(i) WHERE db.db_key = dbkeys(i); deb('Finished registerImportDb'); EXCEPTION WHEN dup_val_on_index THEN raise_application_error(-20512, 'Database already registered'); END registerImportDb; -- -- dropTempResource - drop the object and also delete the entry -- from tempres table. -- -- Get a rowlock on the tempres table that has this name. If -- lock cannot be obtained, then return. Othewise, drop the object -- and delete the row from tempres. -- PROCEDURE dropTempResource( name IN varchar2 ,data_type IN varchar2) IS dblink_not_found exception; dblink_not_open exception; resource_not_found exception; table_not_found exception; pragma exception_init(dblink_not_found,-2024); pragma exception_init(dblink_not_open,-2081); pragma exception_init(resource_not_found, -20509); pragma exception_init(table_not_found, -942); BEGIN deb('Entering dropTempResource name = ' || name || ' , data_type = '|| data_type); -- commit in order to release any locks that are held. COMMIT; -- lock the resource that is about to be dropped IF (NOT lockTempResource(name, data_type)) THEN deb('Finished dropTempResource - resource busy'); RETURN; END IF; IF data_type = 'TABLE' THEN EXECUTE IMMEDIATE 'DROP TABLE ' || name; ELSIF data_type = 'DBLINK' THEN BEGIN EXECUTE IMMEDIATE 'ALTER SESSION CLOSE DATABASE LINK ' || name; EXCEPTION WHEN dblink_not_open THEN NULL; END; EXECUTE IMMEDIATE 'DROP DATABASE LINK ' || name; END IF; -- remove this temporary resource DELETE FROM tempres WHERE tempres.name = dropTempResource.name; COMMIT; deb('Finished dropTempResource'); EXCEPTION WHEN resource_not_found THEN DELETE FROM tempres WHERE tempres.name = dropTempResource.name; COMMIT; deb('Finished dropTempResource - resource_not_found ' || name); WHEN dblink_not_found THEN deb('Finished dropTempResource - dblink_not_found' || name); WHEN table_not_found THEN deb('Finished dropTempResource - table_not_found' || name); WHEN others THEN dbms_output.put_line('caught exception during dropTempResource ' || substr(sqlerrm, 1, 512)); END dropTempResource; -- -- importGlobalScript - import global script -- -- importing global script is tricky because its db_key is NULL. So, -- there could be a unique constraint violation when such name already -- exists. This function appends COPYOF(copyno) to the name when -- unique constraint is violated. -- PROCEDURE importGlobalScript IS type cur_typ is ref cursor; global_scr_c cur_typ; global_scr_q varchar2(1024); local scr%rowtype; given_name scr.scr_name%type; from_table ts_name_list; from_columns varchar2(2048); copycnt number; unique_violated exception; pragma exception_init(unique_violated, -1); BEGIN -- cursor to get global script information from_columns := getColumnName('scr'); global_scr_q := 'SELECT ' || from_columns || ' FROM scr@' || import_dblink || ' WHERE db_key IS NULL'; OPEN global_scr_c FOR global_scr_q; LOOP FETCH global_scr_c INTO local.db_key, local.scr_comment, local.scr_key, local.scr_name; EXIT WHEN global_scr_c%NOTFOUND; copycnt := 0; given_name := local.scr_name; <> BEGIN -- Basically doing -- -- INSERT INTO scr(column1, column2,...) VALUES -- (db_key, scr_comment, scr_key+offset, scr_key); -- EXECUTE IMMEDIATE 'INSERT INTO scr (' || from_columns || ') VALUES ' || '( null,' || case when local.scr_comment is not null then '''' || local.scr_comment || ''',' else 'null,' end || local.scr_key || '+' || import_offset || ',' || '''' || local.scr_name || ''')'; EXCEPTION WHEN unique_violated THEN -- Now change the name to COPY OF and retry again copycnt := copycnt + 1; IF (copycnt = 1) THEN local.scr_name := 'COPY OF ' || given_name; ELSE local.scr_name := 'COPY(' || copycnt || ') OF ' || given_name; END IF; goto tryagain; END; END LOOP; -- Import global script text from_table.delete; from_table(1) := 'scr'; importTable(table_name => 'scrl' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where scr.db_key is null' || ' and scr.scr_key = scrl.scr_key'); END importGlobalScript; /*------------------------------------------------* * Public functions for import catalog processing * *------------------------------------------------*/ -- -- createTempResource - create temporary resource -- -- Inserts the given name and data_type to tempres table. -- PROCEDURE createTempResource( name IN varchar2 ,data_type IN varchar2) IS unique_violated exception; pragma exception_init(unique_violated ,-1); BEGIN -- add this temporary resource INSERT INTO tempres (name, data_type) VALUES (name, data_type); COMMIT; EXCEPTION WHEN unique_violated THEN raise_application_error(-20508, 'resource already in use ' || name); END createTempResource; -- -- lockTempResource - lock temporary resource -- -- obtain a rowlock on the tempres for the given name. Return TRUE -- if the object is found and able to obtain the lock. Otherwise, FALSE. -- FUNCTION lockTempResource( name IN varchar2 ,data_type IN varchar2) RETURN BOOLEAN IS local tempres%ROWTYPE; found number; resource_busy exception; pragma exception_init(resource_busy, -54); BEGIN deb('Entering lockTempResource ' || name); SELECT name, data_type INTO local.name, local.data_type FROM tempres WHERE name = lockTempResource.name FOR UPDATE NOWAIT; IF (data_type = 'TABLE') THEN SELECT count(*) INTO found FROM user_tab_columns WHERE table_name = lockTempResource.name; ELSIF (data_type = 'DBLINK') THEN SELECT count(*) INTO found FROM user_db_links WHERE db_link = lockTempResource.name; ELSE raise_application_error(-20999, 'Internal error in localTempResource(): bad data_type '|| data_type); END IF; IF (found = 0) THEN deb('Finished lockTempResource with resource not found'); raise_application_error(-20509, 'resource not found ' || name); END IF; deb('Finished lockTempResource'); RETURN TRUE; EXCEPTION WHEN resource_busy THEN deb('Finished lockTempResource with resource_busy'); RETURN FALSE; WHEN no_data_found THEN deb('Finished lockTempResource with no_data_found'); RETURN FALSE; END lockTempResource; -- -- cleanupTempResource - cleanup temporary resource -- -- drop all objects in the tempres table and delete the row as well. -- PROCEDURE cleanupTempResource IS CURSOR temp_c IS SELECT name, data_type FROM tempres; BEGIN FOR tempRec in temp_c LOOP dropTempResource(tempRec.name, tempRec.data_type); END LOOP; END cleanupTempResource; -- -- addDbidToImport - add dbid to idb and idbinc table whose -- information has to be imported. -- -- This is executed on source catalog database. Obtain the lock on -- the temporary resource that was allocated for idb and idbinc, then -- add the dbid to the idb and idbinc table. -- PROCEDURE addDbidToImport( first IN binary_integer ,idb IN varchar2 ,idbinc IN varchar2 ,dbid IN number DEFAULT NULL ,dbname IN varchar2 DEFAULT NULL) IS dummy tempres.name%TYPE; ldbid db.db_id%TYPE := dbid; dbid_exists number; BEGIN -- lock the resource whose content is about to be changed IF (NOT lockTempResource(idb, 'TABLE')) THEN raise_application_error(-20508, 'resource already in use ' || idb); ELSIF (NOT lockTempResource(idbinc, 'TABLE')) THEN raise_application_error(-20508, 'resource already in use ' || idbinc); END IF; IF (dbid IS NULL AND dbname IS NULL) THEN EXECUTE IMMEDIATE 'INSERT INTO ' || idb || '(db_key, db_id) ' || '(SELECT db_key, db_id FROM db)'; IF (sql%rowcount = 0) THEN raise_application_error(-20510, 'import database not found'); END IF; EXECUTE IMMEDIATE 'INSERT INTO ' || idbinc || '(dbinc_key) ' || '(SELECT dbinc_key ' || ' FROM dbinc, ' || idb || ' WHERE dbinc.db_key = ' || idb ||'.db_key)'; COMMIT; RETURN; ELSIF (dbname IS NOT NULL) THEN BEGIN SELECT db.db_id INTO ldbid FROM db, dbinc WHERE db.curr_dbinc_key = dbinc.dbinc_key AND dbinc.db_name = upper(addDbidtoImport.dbname); EXCEPTION WHEN no_data_found THEN raise_application_error(-20510, 'import database not found'); WHEN too_many_rows THEN raise_application_error(-20511, 'import database name is ambiguous'); END; ELSE BEGIN SELECT db.db_id INTO ldbid FROM db WHERE db.db_id = ldbid; EXCEPTION WHEN no_data_found THEN raise_application_error(-20510, 'import database not found'); END; END IF; -- check whether all dbid that was previous added exists in idb -- table. Otherwise, some other process reused our table name. IF (first = 0) THEN FOR i in 1..import_dbid.count LOOP EXECUTE IMMEDIATE 'SELECT count(*) FROM ' || idb || ' WHERE ' || idb || '.db_id =' || import_dbid(i) INTO dbid_exists; IF (dbid_exists != 1) THEN raise_application_error(-20508, 'resource already in use ' || idb); END IF; END LOOP; EXECUTE IMMEDIATE 'SELECT count(*) FROM ' || idb INTO dbid_exists; IF (dbid_exists != import_dbid.count) THEN raise_application_error(-20508, 'resource already in use ' || idb); END IF; import_dbid(import_dbid.count + 1) := ldbid; ELSE import_dbid.delete; import_dbid(1) := ldbid; END IF; EXECUTE IMMEDIATE 'INSERT INTO ' || idb || '(db_key, db_id) ' || '(SELECT db_key, db_id FROM db ' || ' WHERE db_id = ' || ldbid || ')'; EXECUTE IMMEDIATE 'INSERT INTO ' || idbinc || '(dbinc_key) ' || '(SELECT dbinc_key ' || ' FROM dbinc, ' || idb || ' WHERE dbinc.db_key = ' || idb || '.db_key ' || ' AND ' || idb || '.db_id = ' || ldbid || ')'; COMMIT; -- lock the resource table once again IF (NOT lockTempResource(idb, 'TABLE')) THEN raise_application_error(-20508, 'resource already in use ' || idb); ELSIF (NOT lockTempResource(idbinc, 'TABLE')) THEN raise_application_error(-20508, 'resource already in use ' || idbinc); END IF; END addDbidToImport; -- -- lockDbidToImport - lock the db that has to imported on source db -- -- This is executed on source recovery catalog database. Basically, -- hold the rowlock on db table so that no resync happens during -- import. -- PROCEDURE lockDbidToImport( idb IN varchar2) IS TYPE cur_typ IS ref CURSOR; idb_c cur_typ; idb_q varchar2(512); local_db_key db.db_key%TYPE; local_db_id db.db_key%TYPE; BEGIN idb_q := 'SELECT db_key FROM ' || idb; OPEN idb_c FOR idb_q; LOOP FETCH idb_c INTO local_db_key; EXIT WHEN idb_c%NOTFOUND; SELECT db_id INTO local_db_id FROM db WHERE db.db_key = local_db_key FOR UPDATE; END LOOP; END lockDbidToImport; -- -- importSchema - import catalog schema -- -- main routine for IMPORT CATALOG command. Goes through each catalog -- table and imports its information. -- PROCEDURE importSchema( dblink IN varchar2 ,idb IN varchar2 ,idbinc IN varchar2) IS from_table ts_name_list; BEGIN import_dblink := dblink; adjustRmanSeq; registerImportDb(idb, idbinc); -- import rman configuration from_table.delete; from_table(1) := idb; importTable(table_name => 'conf' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where conf.db_key = ' || idb || '.db_key'); -- import node from_table.delete; from_table(1) := idb; importTable(table_name => 'node' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where node.db_key = ' || idb || '.db_key'); -- import checkpoint from_table.delete; from_table(1) := idbinc; importTable(table_name => 'ckp' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where ckp.dbinc_key = ' || idbinc || '.dbinc_key'); -- import tablespace from_table.delete; from_table(1) := idbinc; importTable(table_name => 'ts' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where ts.dbinc_key = ' || idbinc || '.dbinc_key'); -- import tablespace attribute from_table.delete; from_table(1) := idbinc; importTable(table_name => 'tsatt' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where tsatt.dbinc_key = ' || idbinc || '.dbinc_key'); -- import datafile from_table.delete; from_table(1) := idbinc; importTable(table_name => 'df' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where df.dbinc_key = ' || idbinc || '.dbinc_key'); -- import datafile attribute from_table.delete; from_table(1) := idbinc; from_table(2) := 'df'; importTable(table_name => 'site_dfatt' ,from_table => from_table ,uniq_rows => TRUE ,where_clause => 'where df.dbinc_key = ' || idbinc || '.dbinc_key' || ' and site_dfatt.df_key = df.df_key'); -- import offline range from_table.delete; from_table(1) := idbinc; importTable(table_name => 'offr' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where offr.dbinc_key = ' || idbinc || '.dbinc_key'); -- import tempfile from_table.delete; from_table(1) := idbinc; importTable(table_name => 'tf' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where tf.dbinc_key = ' || idbinc || '.dbinc_key'); -- import tempfile attribute from_table.delete; from_table(1) := idbinc; from_table(2) := 'tf'; importTable(table_name => 'site_tfatt' ,from_table => from_table ,uniq_rows => TRUE ,where_clause => 'where tf.dbinc_key = ' || idbinc || '.dbinc_key' || ' and site_tfatt.tf_key = tf.tf_key'); -- import redo range from_table.delete; from_table(1) := idbinc; importTable(table_name => 'rr' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where rr.dbinc_key = ' || idbinc || '.dbinc_key'); -- import redo thread from_table.delete; from_table(1) := idbinc; importTable(table_name => 'rt' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where rt.dbinc_key = ' || idbinc || '.dbinc_key'); -- import online redo log from_table.delete; from_table(1) := idbinc; importTable(table_name => 'orl' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where orl.dbinc_key = ' || idbinc || '.dbinc_key'); -- import redo log history from_table.delete; from_table(1) := idbinc; importTable(table_name => 'rlh' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where rlh.dbinc_key = ' || idbinc || '.dbinc_key'); -- import archived log from_table.delete; from_table(1) := idbinc; importTable(table_name => 'al' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where al.dbinc_key = ' || idbinc || '.dbinc_key'); -- import backupset from_table.delete; from_table(1) := idb; importTable(table_name => 'bs' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where bs.db_key = ' || idb || '.db_key'); -- import backuppiece from_table.delete; from_table(1) := idb; importTable(table_name => 'bp' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where bp.db_key = ' || idb || '.db_key'); -- import backup controlfile from_table.delete; from_table(1) := idbinc; importTable(table_name => 'bcf' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where bcf.dbinc_key = ' || idbinc || '.dbinc_key'); -- import copy controlfile from_table.delete; from_table(1) := idbinc; importTable(table_name => 'ccf' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where ccf.dbinc_key = ' || idbinc || '.dbinc_key'); -- import proxy controlfile from_table.delete; from_table(1) := idbinc; importTable(table_name => 'xcf' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where xcf.dbinc_key = ' || idbinc || '.dbinc_key'); -- import backup spfile from_table.delete; from_table(1) := idb; importTable(table_name => 'bsf' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where bsf.db_key = ' || idb || '.db_key'); -- import backup datafile from_table.delete; from_table(1) := idbinc; importTable(table_name => 'bdf' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where bdf.dbinc_key = ' || idbinc || '.dbinc_key'); -- import backup controlfile from_table.delete; from_table(1) := idbinc; importTable(table_name => 'cdf' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where cdf.dbinc_key = ' || idbinc || '.dbinc_key'); -- import proxy datafile from_table.delete; from_table(1) := idbinc; importTable(table_name => 'xdf' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where xdf.dbinc_key = ' || idbinc || '.dbinc_key'); -- import proxy archivelog from_table.delete; from_table(1) := idbinc; importTable(table_name => 'xal' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where xal.dbinc_key = ' || idbinc || '.dbinc_key'); -- import backup redolog from_table.delete; from_table(1) := idbinc; importTable(table_name => 'brl' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where brl.dbinc_key = ' || idbinc || '.dbinc_key'); -- import backup corruption from_table.delete; from_table(1) := idbinc; from_table(2) := 'bdf'; importTable(table_name => 'bcb' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where bdf.bdf_key = bcb.bdf_key' || ' and bdf.dbinc_key = ' || idbinc || '.dbinc_key'); -- import copy corruption from_table.delete; from_table(1) := idbinc; from_table(2) := 'cdf'; importTable(table_name => 'ccb' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where cdf.cdf_key = ccb.cdf_key' || ' and cdf.dbinc_key = ' || idbinc || '.dbinc_key'); -- import rman status row from_table.delete; from_table(1) := idbinc; importTable(table_name => 'rsr' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where rsr.dbinc_key= ' ||idbinc|| '.dbinc_key'); -- import stored script from_table.delete; from_table(1) := idb; importTable(table_name => 'scr' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where scr.db_key = ' || idb || '.db_key'); -- import stored script line from_table.delete; from_table(1) := idb; from_table(2) := 'scr'; importTable(table_name => 'scrl' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where scr.db_key = ' || idb || '.db_key' || ' and scr.scr_key = scrl.scr_key'); -- import global script importGlobalScript; -- import rman output from_table.delete; from_table(1) := idb; importTable(table_name => 'rout' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where rout.db_key = ' || idb || '.db_key'); --Ignore importing config table because it is obsolete. The --values aren't interpreted by rman command. Just exists so that --configure compatible command doesn't throw error in 8.1.6 or lower. -- import flashback from_table.delete; from_table(1) := idbinc; importTable(table_name => 'fb' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where fb.dbinc_key = '|| idbinc || '.dbinc_key'); -- import grsp from_table.delete; from_table(1) := idbinc; importTable(table_name => 'grsp' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where grsp.dbinc_key = '|| idbinc || '.dbinc_key'); -- import bcr from_table.delete; from_table(1) := idb; from_table(2) := 'node'; importTable(table_name => 'bcr' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where bcr.site_key = node.site_key' || ' and node.db_key = ' || idb || '.db_key'); -- import nrsp from_table.delete; from_table(1) := idbinc; importTable(table_name => 'nrsp' ,from_table => from_table ,uniq_rows => FALSE ,where_clause => 'where nrsp.dbinc_key = '|| idbinc || '.dbinc_key'); -- -- Add your new catalog table here -- -- Example -- If there is a table named foo with columns (db_key, myname), then -- you have to import it by joining idb using db_key with idb.db_key. -- So, the importTable usage will look like. -- -- from_table.delete; -- from_table(1) := idb; -- importTable(table_name => 'foo' -- ,from_table => from_table -- ,uniq_rows => FALSE -- ,where_clause => 'where foo.db_key = ' || idb || '.db_key' -- ); -- COMMIT; EXCEPTION WHEN others THEN ROLLBACK; RAISE; END importSchema; -- -- unregisterDatabase - another flavor unregister database -- -- Given the idb table, unregister all the database from source recovery -- catalog database. -- PROCEDURE unregisterDatabase( idb IN varchar2) IS TYPE cur_typ IS ref CURSOR; idb_c cur_typ; idb_q varchar2(512); local_db_id db.db_id%TYPE; BEGIN idb_q := 'SELECT db_id FROM ' || idb; OPEN idb_c FOR idb_q; LOOP FETCH idb_c INTO local_db_id; EXIT WHEN idb_c%NOTFOUND; unregisterDatabase(db_id => local_db_id); END LOOP; END unregisterDatabase; /*----------------------------* * Virtual Private Catalog * *----------------------------*/ -- Note that the "userid" token in the names and variables -- of the next few procedures refers to the user's name, not -- the numeric uid as the token would imply. That is a historical -- error that we are not going to rectify during the fix for bug -- 12400752 because it would require renaming these procedures, which -- could cause compatibility issues when older versions of RMAN use -- the newer version of the recovery catalog. -- -- Where we need to refer to the numeric userid here, we will use the -- name "numeric_uid". FUNCTION grant_get_dbid(dbname IN varchar2) RETURN number IS dbid number; cnt number; BEGIN select max(db_id), count(*) into dbid, cnt from db join (select distinct db_key,db_name from dbinc) using(db_key) where db_name = dbname; if cnt = 0 then raise_application_error(-20018, 'database ' || dbname || ' not found in recovery catalog'); end if; if cnt > 1 then raise_application_error(-20019, 'database ' || dbname || ' not unique in recovery catalog'); end if; return dbid; END; FUNCTION get_db_numeric_uid(uname IN varchar2) RETURN number IS numeric_uid number; BEGIN select user_id into numeric_uid from all_users where username = uname; return numeric_uid; EXCEPTION WHEN no_data_found THEN raise_application_error(-20022, 'user ' || uname || ' not found'); WHEN OTHERS THEN RAISE; END; PROCEDURE clean_old_uids IS BEGIN delete from vpc_databases where filter_uid not in (select user_id from all_users); delete from vpc_users where filter_uid not in (select user_id from all_users); -- No commit is done here because this is always called at the -- beginning of a grant or revoke procedure that will do its own commit. END; PROCEDURE revoke_clean_userid(userid IN varchar2) IS BEGIN delete from vpc_users where filter_user=userid and add_new_db='N' and not exists (select * from vpc_databases where filter_user=userid); END; PROCEDURE grant_catalog(userid IN varchar2, dbname IN varchar2) IS BEGIN grant_catalog(userid, grant_get_dbid(dbname)); END; PROCEDURE grant_catalog(userid IN varchar2, dbid IN number) IS user_count number; numeric_uid number; BEGIN clean_old_uids; select count(*), max(filter_uid) into user_count, numeric_uid from vpc_users where filter_user = userid; if user_count = 0 then numeric_uid := get_db_numeric_uid(userid); insert into vpc_users(filter_user, filter_uid, add_new_db) values(userid, numeric_uid, 'N'); end if; insert into vpc_databases(filter_user, filter_uid, db_id) select userid, numeric_uid, dbid from dual where not exists (select 1 from vpc_databases where filter_user = userid and db_id = dbid); commit; END; PROCEDURE grant_register(userid IN varchar2) IS numeric_uid number := get_db_numeric_uid(userid); BEGIN clean_old_uids; merge into vpc_users using dual on (filter_user=userid) when matched then update set add_new_db = 'Y' when not matched then insert(filter_user, filter_uid, add_new_db) values(userid, numeric_uid, 'Y'); commit; END; PROCEDURE revoke_catalog(userid IN varchar2, dbname IN varchar2) IS BEGIN revoke_catalog(userid, grant_get_dbid(dbname)); END; PROCEDURE revoke_catalog(userid IN varchar2, dbid IN number) IS BEGIN clean_old_uids; delete from vpc_databases where filter_user = userid and db_id = dbid; revoke_clean_userid(userid); commit; END; PROCEDURE revoke_register(userid IN varchar2) IS BEGIN clean_old_uids; update vpc_users set add_new_db='N' where filter_user=userid; revoke_clean_userid(userid); commit; END; PROCEDURE revoke_all(userid IN varchar2) IS BEGIN clean_old_uids; delete from vpc_databases where filter_user = userid; delete from vpc_users where filter_user = userid; commit; END; PROCEDURE vpc_run_sql(cre in boolean) IS type ct is ref cursor; c ct; stmt_type varchar2(1) := 'D'; stmt_sql long; begin if cre then stmt_type := 'C'; end if; open c for 'select stmt_sql from ' || dbms_catowner || '.cfs_v where stmt_type = ''' ||stmt_type|| ''' order by stmt_number'; loop fetch c into stmt_sql; exit when c%notfound; begin execute immediate stmt_sql; exception when others then if sqlcode != -942 or cre then raise; end if; end; end loop; close c; end vpc_run_sql; PROCEDURE create_virtual_catalog IS count1 number; type ct is ref cursor; c ct; begin open c for 'select count(*) from ' || dbms_catowner || '.vpc_users_v'; fetch c into count1; close c; if count1 = 0 then raise_application_error(-20015, 'Not authorized to share this catalog'); end if; begin select count(*) into count1 from user_role_privs where default_role = 'YES' and granted_role = 'RECOVERY_CATALOG_OWNER'; if count1 = 0 then raise_application_error(-20014, 'Must be granted RECOVERY_CATALOG_OWNER role'); end if; exception when others then if sqlcode != -942 then raise; end if; end; execute immediate 'update ' || dbms_catowner || '.vpc_users_v set version = null'; commit; vpc_run_sql(true); update vpc_users set version = (select max(version) from rcver); commit; end create_virtual_catalog; PROCEDURE drop_virtual_catalog IS begin vpc_run_sql(false); end drop_virtual_catalog; PROCEDURE dumpPkgState (msg in varchar2 default NULL) IS begin deb('Global variables package state ' || nvl(msg,' ')); deb('Number variables'); deb('dbglvl: ' || nvl(to_char(dbglvl), 'NULL')); deb('this_db_key: ' || nvl(to_char(this_db_key), 'NULL')); deb('this_dbinc_key: ' || nvl(to_char(this_dbinc_key), 'NULL')); deb('this_ckp_key: ' || nvl(to_char(this_ckp_key), 'NULL')); deb('this_ckp_scn: ' || nvl(to_char(this_ckp_scn), 'NULL')); deb('this_site_key: ' || nvl(to_char(this_site_key), 'NULL')); deb('logs_shared: ' || nvl(to_char(logs_shared), 'NULL')); deb('disk_backups_shared: ' || nvl(to_char(disk_backups_shared), 'NULL')); deb('tape_backups_shared: ' || nvl(to_char(tape_backups_shared), 'NULL')); deb('reNorm_state: ' || nvl(to_char(reNorm_state), 'NULL')); deb('resync_reason: ' || nvl(to_char(resync_reason), 'NULL')); deb('scr_key: ' || nvl(to_char(scr_key), 'NULL')); deb('scr_line: ' || nvl(to_char(scr_line), 'NULL')); deb('kccdivts: ' || nvl(to_char(kccdivts), 'NULL')); deb('cntbs: ' || nvl(to_char(cntbs), 'NULL')); deb('last_full_ckp_scn: ' || nvl(to_char(last_full_ckp_scn), 'NULL')); deb('last_ts#: ' || nvl(to_char(last_ts#), 'NULL')); deb('last_file#: ' || nvl(to_char(last_file#), 'NULL')); deb('last_thread#: ' || nvl(to_char(last_thread#), 'NULL')); deb('last_ts_recid: ' || nvl(to_char(last_ts_recid), 'NULL')); deb('last_df_recid: ' || nvl(to_char(last_df_recid), 'NULL')); deb('last_tf_recid: ' || nvl(to_char(last_tf_recid), 'NULL')); deb('last_rt_recid: ' || nvl(to_char(last_rt_recid), 'NULL')); deb('last_orl_recid: ' || nvl(to_char(last_orl_recid), 'NULL')); deb('last_conf_recid: ' || nvl(to_char(last_conf_recid), 'NULL')); deb('force_resync2cf: ' || nvl(to_char(force_resync2cf), 'NULL')); deb('last_rlh_recid: ' || nvl(to_char(last_rlh_recid), 'NULL')); deb('last_al_recid: ' || nvl(to_char(last_al_recid), 'NULL')); deb('last_offr_recid: ' || nvl(to_char(last_offr_recid), 'NULL')); deb('last_bs_recid: ' || nvl(to_char(last_bs_recid), 'NULL')); deb('last_bp_recid: ' || nvl(to_char(last_bp_recid), 'NULL')); deb('last_bdf_recid: ' || nvl(to_char(last_bdf_recid), 'NULL')); deb('last_bsf_recid: ' || nvl(to_char(last_bsf_recid), 'NULL')); deb('last_brl_recid: ' || nvl(to_char(last_brl_recid), 'NULL')); deb('last_cdf_recid: ' || nvl(to_char(last_cdf_recid), 'NULL')); deb('last_bcb_recid: ' || nvl(to_char(last_bcb_recid), 'NULL')); deb('last_ccb_recid: ' || nvl(to_char(last_ccb_recid), 'NULL')); deb('last_do_recid: ' || nvl(to_char(last_do_recid), 'NULL')); deb('last_xdf_recid: ' || nvl(to_char(last_xdf_recid), 'NULL')); deb('last_xal_recid: ' || nvl(to_char(last_xal_recid), 'NULL')); deb('last_rsr_recid: ' || nvl(to_char(last_rsr_recid), 'NULL')); deb('last_rout_stamp: ' || nvl(to_char(last_rout_stamp), 'NULL')); deb('last_inst_startup_stamp: ' || nvl(to_char(last_inst_startup_stamp), 'NULL')); deb('lrsr_key: ' || nvl(to_char(lrsr_key), 'NULL')); deb('lrout_skey: ' || nvl(to_char(lrout_skey), 'NULL')); deb('lsession_recid: ' || nvl(to_char(lsession_recid), 'NULL')); deb('lsession_stamp: ' || nvl(to_char(lsession_stamp), 'NULL')); deb('lrman_status_recid: ' || nvl(to_char(lrman_status_recid), 'NULL')); deb('lrman_status_stamp: ' || nvl(to_char(lrman_status_stamp), 'NULL')); deb('krbmror_llength_bytes: ' || nvl(to_char(krbmror_llength_bytes), 'NULL')); deb('last_ic_recid: ' || nvl(to_char(last_ic_recid), 'NULL')); deb('last_reset_scn: ' || nvl(to_char(last_reset_scn), 'NULL')); deb('last_dbinc_key: ' || nvl(to_char(last_dbinc_key), 'NULL')); deb('low_nrsp_recid: ' || nvl(to_char(low_nrsp_recid), 'NULL')); deb('last_nrsp_recid: ' || nvl(to_char(last_nrsp_recid), 'NULL')); deb('last_grsp_recid: ' || nvl(to_char(last_grsp_recid), 'NULL')); deb('last_bcr_recid: ' || nvl(to_char(last_bcr_recid), 'NULL')); deb('last_resync_cksum: ' || nvl(to_char(last_resync_cksum), 'NULL')); deb('Date variables'); deb('this_ckp_time: ' || nvl(to_char(this_ckp_time, 'DD/MM/YYYY HH24:MI:SS'), 'NULL')); deb('last_reset_time: ' || nvl(to_char(last_reset_time, 'DD/MM/YYYY HH24:MI:SS'), 'NULL')); deb('last_cf_version_time: ' || nvl(to_char(last_cf_version_time, 'DD/MM/YYYY HH24:MI:SS'), 'NULL')); deb('Char variables'); deb('last_fname: ' || nvl(last_fname, 'NULL')); deb('last_rspname: '|| nvl(last_rspname, 'NULL')); deb('this_cf_type: '|| nvl(this_cf_type, 'NULL')); deb('this_db_unique_name: ' || nvl(this_db_unique_name, 'NULL')); deb('Boolean variables'); if debug is NULL then deb('debug is NULL'); elsif scr_glob then deb('debug is TRUE'); else deb('debug is FALSE'); end if; if client_site_aware is NULL then deb('client_site_aware is NULL'); elsif client_site_aware then deb('client_site_aware is TRUE'); else deb('client_site_ware is FALSE'); end if; if scr_glob is NULL then deb('scr_glob is NULL'); elsif scr_glob then deb('scr_glob is TRUE'); else deb('scr_glob is FALSE'); end if; if do_temp_ts_resync is NULL then deb('do_temp_ts_resync is NULL'); elsif do_temp_ts_resync then deb('do_temp_ts_resync is TRUE'); else deb('do_temp_ts_resync is FALSE'); end if; end dumpPkgState; /*--------------------------------------------------* * Package Instantiation: Initialize Package State * *--------------------------------------------------*/ BEGIN tsRec.ts# := NULL; -- not in TableSpaceResync dfRec.file# := NULL; -- not in middle of dfResync version_list(1) := '08.00.04.00'; -- In 8.0.5 the following changes were made: -- 1. Allow null for fname and blocks in checkDatafile. This was -- done for bug 612344, which had to do with datafiles that are -- MISSING or UNNAMED (KCCFECKD bit set). In these cases, the controlfile -- does not contain a valid filename, and also implies the filesize -- which is in the fileheader cannot be obtained either. version_list(2) := '08.00.05.00'; -- In 8.1.3 the following changes were made: -- 1. Added 'X' (expired) backup piece status. version_list(3) := '08.01.03.00'; -- In 8.1.6 the following changes were made: -- 1) add stopTime to checkDatafile() version_list(4) := '08.01.06.00'; -- In 8.1.7 the following changes were made: -- 1) Add controlfile_type to bcf -- 2) Add controlfile_included to bs -- 3) Add controlfile_type to ccf -- 4) Add controlfile_type to xcf -- 5) Add is_standby to al -- 6) Add input_file_scan_only to bs version_list(5) := '08.01.07.00'; -- In 9.0.0 the following changes were made: -- 1) add setConfig(), deleteConfig(), resetConfig(), -- beginConfigResync() and endConfigREsync(). -- 2) Added 'X' (expired) status for CC, DC, AL objects. -- 3) Added blocks to bcf version_list(6) := '09.00.00.00'; -- In 9.2.0 the following changes were made: -- 1) add beinBackupSpFileResync, addBackupSpFile, checkBackupSpFile, -- endBackupSpFileResync -- 2) add beginIncarnationResync, endIncarnationResync, checkIncarnation -- 3) Never do a full resync when controlfile is not CURRENT and -- do not update high water marks. version_list(7) := '09.02.00.00'; -- In 10.0 the following changes were made: -- 1) add beingRmanStatusResync, checkRmanStatus, endRmanStatusResync. -- 2) add setConfig2(), beginConfigResync2(), and endConfigREsync2(). version_list(8) := '10.01.00.00'; -- 10gR2 version version_list(9) := '10.02.00.00'; version_list(10) := '10.02.00.01'; -- 11gR1 version version_list(11) := '11.01.00.00'; version_list(12) := '11.01.00.01'; version_list(13) := '11.01.00.02'; version_list(14) := '11.01.00.03'; version_list(15) := '11.01.00.04'; version_list(16) := '11.01.00.05'; version_list(17) := '11.01.00.06'; version_list(18) := '11.01.00.07'; version_list(19) := '11.02.00.00'; version_list(20) := '11.02.00.01'; version_list(21) := '11.02.00.02'; version_list(22) := '11.02.00.03'; version_max_index := 22; END dbms_rcvcat; >>> define prvtrmnu_plb <<< -- Copyright (c) 2006, 2011, Oracle and/or its affiliates. -- All rights reserved. -- -- CREATE OR REPLACE PACKAGE BODY dbms_rcvman IS -- -- NAME -- prvtrmnu.sql - Recovery MANager package body -- -- DESCRIPTION -- This package contains procedures querying information that -- Recovery Manager needs from the recovery catalog or the target -- database control file -- -- This is the version used by catrman.sql; there is a separate -- body in the file prvtrmns.pls that is loaded into SYS; this one -- is loaded into a user schema. -- -- NOTES -- Remember to make corresponding changes in prvtrmns.pls. -- But also note that there is public common code here for both files, -- between BEGIN_PUBCOMMON_RCVMAN_CODE and END_PUBCOMMON_RCVMAN_CODE and -- private common code between BEGIN_PRVCOMMON_RCVMAN_CODE and -- END_PRVCOMMON_RCVMAN_CODE. -- -- MODIFIED (MM/DD/YY) -- kgiyer 02/24/11 - Backport kgiyer_bug-10377075 from main -- fsanchez 02/07/11 - Backport fsanchez_bug-7293136 from main -- jkrismer 01/05/11 - backport 9439973 from main -- molagapp 11/24/10 - Backport 10303221: basebug 9476155 -- molagapp 11/24/10 - Backport 10315698: basebug 10264638 -- fsanchez 11/24/10 - Backport fsanchez_b9764019_x64 from main -- molagapp 11/29/10 - bump up version to 11.2.0.3 -- molagapp 06/03/10 - bug-9712461 -- jkrismer 04/27/10 - bug-9431112 -- molagapp 04/10/10 - bug-8926194 -- fsanchez 04/12/10 - bug 9550354 -- fsanchez 03/18/10 - bug 9308761 -- fsanchez 03/07/10 - bug 8554110 - fix setUntilLog -- raguzman 02/19/10 - ListBackup: KEEP backups with no datafiles -- fsanchez 01/11/10 - bug 9044053 -- molagapp 12/09/09 - bug 9190517 -- molagapp 07/15/09 - bump up version to 11.2.0.2 -- molagapp 07/14/09 - bug-8686040 -- raguzman 05/27/09 - bug-8482844 getIncrementalSCN keep default -- molagapp 05/20/09 - bug 8319946 -- molagapp 04/29/09 - bump up version to 11.2.0.1 -- molagapp 03/05/09 - bug-7396077 -- molagapp 03/27/09 - bug 5739423 -- fsanchez 03/14/09 - bug 4381732 - use escape in like for datafiles -- raguzman 02/10/09 - findSpfileBackup needs to retain 10.2 signature -- molagapp 02/05/09 - bug 7572548 -- fsanchez 12/01/08 - bug 7609598 -- banand 11/30/08 - bug 7581672 -- molagapp 10/08/08 - bug-5554609 -- molagapp 08/25/08 - lrg_3533498 -- banand 08/08/08 - bug-6459358 -- banand 07/08/08 - bug-7117200 -- molagapp 07/22/08 - bug 7268955 -- banand 07/08/08 - bug-7117200 -- banand 05/29/08 - bug 7138218 -- molagapp 11/07/07 - bug-6616834 -- fsanchez 09/14/06 - project 2042 -- jkrismer 12/21/07 - bug-6658764 add "cmd is null OR" condition in all -- places where cmd != 'B', fix pre-11g delete cmd -- raguzman 01/02/08 - Fix spfile computeTime -- raguzman 12/07/07 - KEEP controlfile restore via restore point fix -- jciminsk 10/22/07 - Upgrade support for 11.2 -- jciminsk 10/08/07 - version to 11.2.0.0.0 -- molagapp 10/03/07 - bug-6138791 -- wfisher 10/02/07 - Allowing incarnation on archivelog specifiers -- molagapp 09/24/07 - bug-6402384 -- jciminsk 08/03/07 - version to 11.1.0.7.0 -- raguzman 06/13/07 - KEEP backups should not be counted for redundancy -- molagapp 06/05/07 - bump up version to 11.1.0.6 -- raguzman 05/10/07 - backupHistory cursors, check for keep in CDF/XDF -- molagapp 06/06/07 - add isArchivedLogMissing -- banand 05/19/07 - initialise is_standby in DbUniqueNameIsStandby -- banand 05/18/07 - dont return non-EOR log -- banand 05/05/07 - set this_site_key when client is site aware -- banand 05/05/07 - set client_site_aware when client says so -- molagapp 04/22/07 - bug 6014000 -- banand 04/05/07 - variables to control log, disk/tape backup sharing -- molagapp 04/18/07 - honor allIncarnation for getArchivedNextSCN -- molagapp 04/18/07 - bump up version to 11.1.0.5 -- molagapp 04/02/07 - bug-5936294 -- raguzman 03/26/07 - Incr keep backups are not being retained. -- raguzman 03/21/07 - Incr L1 matches keep-tag/nokeep attributes -- for start scn -- molagapp 03/18/07 - bug-5905781 -- molagapp 03/09/07 - bug-5902410 -- raguzman 02/23/07 - Add getPrimaryDfName routine -- banand 03/10/07 - lrg 2889531 -- swerthei 01/24/07 - fix rc_backup_set.multi_section -- molagapp 02/28/07 - rename remote archived log to foreign archived log -- banand 02/18/07 - bug-5881248 -- molagapp 02/14/07 - bump up version to 11.1.0.4 -- banand 01/29/07 - primary before standby in list command -- banand 02/23/07 - skip logs from not required branch during recovery -- molagapp 01/28/07 - bug-5870927 -- banand 11/28/06 - bug 5620103 -- banand 11/25/06 - bug 5647645 -- molagapp 11/19/06 - bug-5620640 -- banand 11/19/06 - delete obsolete must respect shipped/backed option -- molagapp 10/25/06 - add getArchivedNextSCN -- molagapp 11/01/06 - bump up version to 11.1.0.3 -- swerthei 09/21/06 - add section size to rcvrec_t -- banand 09/10/06 - bug 5441981 -- molagapp 10/04/06 - bump up version to 11.1.0.2 -- molagapp 09/15/06 - bump version to 11.1.0.1 -- raguzman 08/02/06 - Fix listGetRestorePoint, for guaranteed -- molagapp 07/13/06 - bug-5382453 -- banand 07/13/06 - fix order by clause in translateArcLogSCNRange2 -- raguzman 06/08/06 - Add LIST RESTORE POINT -- raguzman 05/18/06 - Get restore point -- raguzman 05/15/06 - Backups with keep are seperate from non-keep -- raguzman 04/04/06 - KEEP Archivelog backups long term -- molagapp 05/29/06 - improve block corruption project -- swerthei 05/15/06 - multi-section backups -- banand 04/27/06 - 17844_phase_2: spfile/change/resync changes -- molagapp 01/23/06 - backup transportable tablespace -- banand 04/20/06 - proj 17852 - log management -- molagapp 03/20/06 - bug-5106952 -- banand 12/27/05 - schema changes to track node specific info -- banand 03/27/06 - 17844_phase_1: database site awareness -- banand 12/27/05 - schema changes to track site specific info -- banand 02/09/06 - bug 4595644 -- molagapp 01/16/06 - bump up version to 11.1.0.0 -- banand 12/07/05 - bug-4545809 -- molagapp 11/07/05 - bug-4558970 -- banand 09/27/05 - bug 4637849 -- molagapp 10/03/05 - update versionList -- molagapp 09/02/05 - add ceilAsm to getSpaceRecl -- molagapp 08/17/05 - bug-4548861 -- banand 08/02/05 - bug 4524478 -- molagapp 08/03/05 - move common private procedure from pls -- molagapp 07/23/05 - move common public procedure from pls -- molagapp 07/23/05 - code cleanup -- molagapp 06/13/05 - bug 4430230 - add flbrp to setUntilScn -- banand 05/16/05 - encrypted backups -- molagapp 04/25/05 - skip storing bdf records for obsolete -- banand 04/21/05 - bug 4291935 -- molagapp 05/02/05 - bug-4332795 -- molagapp 04/26/05 - bug-4330520 -- banand 03/03/05 - bug 4214635 -- banand 04/07/05 - bug 4273012 -- molagapp 02/01/05 - bug-4146404: add getDbUniqueName -- molagapp 03/23/05 - bug 4180014 -- molagapp 02/10/05 - rewrite sql query for performance -- molagapp 02/12/05 - fix history ordering -- molagapp 02/01/05 - lrg 1807418 -- molagapp 02/06/05 - fix obsolete device type option (bug 4168850) -- molagapp 01/18/05 - bug-4110708 -- molagapp 12/20/04 - protect debug for procedures used in sql query -- molagapp 11/23/04 - re-fix bug# 3857039 - check for new backupset -- molagapp 11/23/04 - re-fix bug# 3857039 - fix overflow tag, device -- raguzman 11/22/04 - Define getRequiredSCN -- molagapp 11/01/04 - remove skipping keep backups while trimming -- molagapp 10/29/04 - bug-3964370 -- molagapp 09/24/04 - bug-3773849 -- ssamaran 08/11/04 - bug-3736736 -- molagapp 09/17/04 - bug-3341831 -- molagapp 09/22/04 - bug 3857039 -- banand 09/14/04 - display two digits for size -- molagapp 09/02/04 - init lbFbUntilTime and lbMinGrsp -- banand 09/03/04 - lrg 1738356 -- molagapp 08/15/04 - bug-3520255 -- molagapp 08/23/04 - lrg 1732913 -- molagapp 08/13/04 - add rlgscn, rlgtime to setUntilScn -- molagapp 08/06/04 - bug-3755971 -- banand 08/04/04 - bug-3330647 -- molagapp 07/29/04 - bug-3741999 -- banand 07/21/04 - fix display time -- banand 07/13/04 - define functions to display column values -- banand 07/06/04 - bug 3718483 -- molagapp 06/29/04 - bug 3664004 -- molagapp 05/17/04 - interpret null incremental level as INCR backup -- banand 04/20/04 - enhanced RMAN job views -- molagapp 05/03/04 - add setUntilResetlogs -- molagapp 04/21/04 - tempfile re-creation project -- molagapp 04/15/04 - bug-3572747 -- molagapp 04/14/04 - lrg_1636108 -- molagapp 04/06/04 - order obsolete list by stamp -- rasivara 04/05/04 - bug 2391697: Add TranslateDatafileCancel -- banand 03/29/04 - bug 3386041 -- molagapp 03/23/04 - bug-3527769 -- jeffyu 11/04/03 - bug 3234433 -- molagapp 12/15/03 - bug 3310413 -- molagapp 01/09/04 - bug 3365344 -- banand 12/24/03 - bug 3339262 -- jeffyu 11/26/03 - lrg 1596996 -- molagapp 11/28/03 - allow imagecopy on sbt device for recover copy -- banand 11/11/03 - bug 2665255 -- banand 10/21/03 - if dbid known, show only those incarnations -- molagapp 10/24/03 - lrg# 1585247 -- sjeyakum 09/24/03 - bug 3141318 -- molagapp 10/17/03 - bug-3201216 -- sjeyakum 09/19/03 - bug 3085334 -- banand 10/08/03 - bug 3178592 -- molagapp 09/05/03 - lrg 1564671: close all open cursors -- sdizdar 08/30/03 - fix report obsolete if redunadncy is NONE -- fsanchez 05/26/03 - bug-2675757 -- banand 08/18/03 - bug 2998129 -- jeffyu 08/26/03 - lrg 1562424 -- sdizdar 08/20/03 - bug-3005920 -- jeffyu 07/21/03 - bug 2976535 -- jeffyu 07/07/03 - modifying computeUntilSCN() for bug 2995508 -- molagapp 07/08/03 - don't report obs als when del policy is applied -- molagapp 05/07/03 - bug 2917664 -- banand 07/11/03 - fix alBackupHistory_c -- molagapp 07/02/03 - uninitialize lbState collections during firstcall -- molagapp 06/12/03 - add setToLog -- sdizdar 06/06/03 - fix bs_status NULL -- banand 05/13/03 - multi-node RMAN configuration support -- molagapp 05/17/03 - use incremental backups with incr_scn=create_scn -- to restore and report obsolete -- sdizdar 05/18/03 - fix order in backup_files view -- molagapp 05/27/03 - lrg_fix_030520 -- molagapp 04/12/03 - use NOCOPY for record arguments -- molagapp 03/23/03 - use dbms_output buffer_size as null -- molagapp 03/16/03 - add cache mechanism for findValidBackupSet -- molagapp 03/12/03 - one cursor for openRecoveryAction -- sdizdar 05/01/03 - fix backup_files view -- molagapp 04/25/03 - bug-2841084: for recover of copy -- banand 01/28/03 - restore preview -- sdizdar 04/30/03 - fix listBackup cursor -- sdizdar 01/29/03 - prj 2090 (compress backup): -- add compressed in listBackup_c and rcvRec_t -- banand 04/03/03 - fix inc_record_c -- sdizdar 03/26/03 - bug-2867661 -- nsadaran 03/11/03 - bug 2828126 -- nsadaran 01/29/03 - datafilecopy duplicate handling -- banand 02/26/03 - bug 2707377 -- molagapp 03/02/03 - add craGetAllCfBackups -- molagapp 02/29/03 - add rc_listBackupPipe -- molagapp 02/25/03 - fix listBackup: enable redo from non-current inc -- sdizdar 02/25/03 - do not resync RMAN_STATUS -- sdizdar 02/12/03 - bug-2712286: fix computeUntilSCN -- banand 02/04/03 - bug 2759308 -- molagapp 02/04/03 - fix 10i package compatibility -- molagapp 02/07/03 - add identicals to translateDataFileCopy -- banand 02/03/03 - list compatibility fix -- molagapp 11/19/02 - add setRecoveryDestFile -- nsadaran 01/06/03 - accessing controlfilecopies by tag and key -- molagapp 12/03/02 - standby aging rule -- banand 12/09/02 - fix/doc ordering for incarnation in cursors -- - backup optimization for archivelogs accross inc -- fsanchez 11/12/02 - lrg_1108 -- banand 11/18/02 - canApplyAnyRedo to false for pre-10i RMAN -- banand 10/31/02 - compare resetlogs data when setting duplicate flag -- fsanchez 10/08/02 - multiple_file_copy_2 -- banand 10/30/02 - fix select from offr in openrecoveryaction cursor -- sdizdar 09/15/02 - add cfType_obj to rcvrec_t -- sdizdar 10/07/02 - version is 10.0.0 -- sdizdar 09/30/02 - fix maxDfNumber in listBackup() -- molagapp 08/06/02 - recovery area project -- banand 08/09/02 - Recovery thru resetlogs proj: -- - get prvtrmns changes for resetlogs changes to -- catalog version. -- molagapp 05/23/02 - add getRecoverCopyScn for applyincr2copies -- molagapp 09/22/02 - ignore inc for backupset optimization: bug 2588020 -- banand 04/02/02 - create datafile -- molagapp 12/21/01 - restore failover project -- fsanchez 07/27/02 - multiple_file_copy -- mjaeger 08/23/02 - bug 2458246: report/delete obsolete + SPFILE -- molagapp 07/31/02 - bug 2484250 -- sdizdar 07/26/02 - undo openRecoveryActionCursor() changes -- sdizdar 07/19/02 - init dfRecTab in listBackup() -- sdizdar 01/31/02 - OEM views (part of prj 5779): -- - Add listBackup(), listBackupPipe(), -- getParentIncarnation(), and getRetentionPolicy() -- - add listBackup_c cursor -- - modify openRecoveryActionCursor(), addAction() -- - fix findSpfileBackup_c cursor -- molagapp 06/16/02 - bug 2419164 -- molagapp 05/19/02 - bug 2336178 -- molagapp 05/17/02 - honor proxy tag in getArchivedLogBackup -- molagapp 04/16/02 - proxy archived log -- molagapp 02/14/02 - ignore debug exception -- molagapp 02/01/02 - cast null -- sdizdar 02/01/02 - bug-2209822: add auxName to cursors -- molagapp 01/24/02 - rework backup history cursors -- molagapp 01/23/02 - bug-2174697: add inCorebsRec_t framework -- molagapp 01/19/02 - listBackupsetFiles - obey allIncarnations flg -- molagapp 12/12/01 - bug 2146724 -- molagapp 11/29/01 - update package version 9.2.0 -- sdizdar 11/05/01 - fix findBackupsetFiles -- molagapp 10/19/01 - add cfSequence, cfDate to recovery record -- molagapp 10/17/01 - bug 1530744 -- molagapp 10/19/01 - bug 2067187 -- molagapp 10/06/01 - add translateAllBackupSet -- sdizdar 09/07/01 - SPFILE backup: -- - add getSpfileBackup(), listGetSpfileBackup(), -- listTranslateSpfileBackup(), findSpfileBackup_c -- molagapp 09/18/01 - bug-1999761 -- molagapp 07/23/01 - bug-1900314 -- fsanchez 05/23/01 - dbnewid -- sdizdar 06/15/01 - bug-1782808: -- - improve computerecoveryaction -- - fix resetAll and setTransClause -- molagapp 06/07/01 - bug-1712720: add setTransClause -- molagapp 05/06/01 - add cursors to fix backup history performance -- swerthei 04/16/01 - improve performance of getCheckpoint -- sdizdar 04/21/01 - bug-961713: fix computeUntilSCN -- swerthei 04/04/01 - use new corruption view -- molagapp 05/08/01 - fix query performance -- sdizdar 04/10/01 - bug-1717268: add flags to getBackupHistory -- swerthei 04/03/01 - add new flavor of getCheckpoint -- swerthei 03/12/01 - simplify lrtbs query -- fsanchez 01/25/01 - bug-1586048 -- sdizdar 11/10/00 - bug-1496982: 8.2.0 -> 9.0.0 -- sdizdar 11/25/00 - fixed findConfig_c -- sdizdar 11/09/00 - bug-1478539: add keep attributes to findBackupSet -- dbeusee 10/27/00 - bug-1469307 -- sdizdar 11/04/00 - bug-1479780: -- - findArchivedLogCopy doesn't return online logs -- sdizdar 10/23/00 - bug-1477008: -- - add controlfile to openRecoveryActionCursor -- sagrawal 10/24/00 - Fixing defaults for body and spec -- dbeusee 10/17/00 - bug-1462384 -- sdizdar 10/08/00 - bug-1398333: -- - translate don't use rc_backup_set but bs -- sdizdar 08/24/00 - trimRecoveryActions doesn't count "keep" backups -- - keep atributes added to right views -- dbeusee 09/25/00 - rman82_list_fixes_1 -- molagapp 08/23/00 - add autobackup arg to listTranslateCfileBackup -- molagapp 08/28/00 - fix 8.2 upgrade -- dbeusee 06/13/00 - rman82_maint_syntax_unification -- dbeusee 07/20/00 - rman82_debug_enhancements -- sdizdar 08/13/00 - Show all and misc improvement: -- - change RECOVERABLE/UNERCOREVABLE to LOGS/NOLOGS -- smuralid 08/08/00 - common sql fe changes -- molagapp 07/19/00 - restore optimization. -- sdizdar 06/29/00 - Configure auxfilename and exclude tablespace: -- - add tsNumber and inBackup in -- datafile translation -- banand 06/23/00 - set duplicate in alRec_t -- molagapp 06/08/00 - restartable backups -- molagapp 06/01/00 - backup optimization -- sdizdar 05/20/00 - RMAN Retention Policy (keep): -- - add keep attributes in translation procedures -- - modified setUntilTime -- - modified findArcLogBackup/Copy -- swerthei 06/29/00 - fix cursor variables -- mjstewar 07/24/00 - OMF: set newname ... to new -- banand 05/19/00 - use name and value in findConfig_c -- dbeusee 05/03/00 - rman82_cf_status_unification -- dbeusee 11/01/99 - status_mask -- molagapp 05/18/00 - block media recovery -- fsanchez 03/24/00 - instantiate_standby -- sdizdar 04/15/00 - RMAN configuration: add getConfig -- molagapp 04/27/00 - backup backupset: add getmaxcopyno -- banand 04/12/00 - add tag options to recover cmd -- dbeusee 04/03/00 - xcheck_autolocate -- swerthei 03/30/00 - xcheck_autolocate: return backup piece device type -- fsanchez 01/07/00 - bug-1040149 -- molagapp 02/16/00 - bug 1186598: fix compatibility with RMAN 8.1.5 -- executable -- dbeusee 10/20/99 - bug-1043144: fix findProxyCopy -- fsanchez 09/24/99 - fix findValidBackupSet_c cursor -- mluong 08/30/99 - Fix typo -- gpongrac 10/12/99 - remove validateBackupSetCursor_t -- gpongrac 09/09/99 - comments -- swerthei 08/05/99 - close cursors in resetAll -- mluong 08/30/99 - fix typo -- gpongrac 08/10/99 - fix translateArchivedLog -- gpongrac 07/12/99 - bug 927353: translateArchivedLogSCNRange needs to -- gpongrac 07/06/99 - change rcver to 8.1.6 -- gpongrac 06/16/99 - report obsolete support -- gpongrac 01/12/99 - 8.2 restructure -- gpongrac 04/08/99 - change REM to -- -- swerthei 12/28/98 - 787381 - consider proxies in getIncrementalSCN -- swerthei 11/26/98 - more debugging improvements -- swerthei 11/25/98 - enable debugging code from rman -- gpongrac 11/25/98 - fix debugging in getRecveryAction0 -- dbeusee 11/13/98 - misc_815: Handle LIST RECOVERABLE UNTIL TIME... -- dbeusee 11/11/98 - misc_815: fix LIST COPY LIKE... -- dbeusee 11/07/98 - misc_815: fix LIST COPY/BS FROM TIME/UNTIL TIME.. -- dbeusee 10/30/98 - misc_815: added stamp2date for LIST COPY -- dbeusee 10/19/98 - misc_815: fix LIST BACKUPSET FROM TIME... -- gpongrac 10/19/98 - misc_815: fix LIST with TAG -- dbeusee 10/17/98 - misc_815: fix LIST ALL device type filtering -- gpongrac 10/05/98 - misc_815: fix recovery -- gpongrac 09/10/98 - misc_815: fix computeRecoveryActions for list cmd -- swerthei 10/24/98 - add proxy support to REPORT OBSOLETE -- swerthei 10/20/98 - add listTranslateProxyDataFile, listGetProxyDataF -- fsanchez 09/04/98 - bug719092 -- swerthei 06/22/98 - prepare for wrapping to recover.bsq -- gpongrac 06/04/98 - fix comment -- swerthei 06/01/98 - add CHANGE PROXY -- swerthei 05/27/98 - proxy restore -- dbeusee 05/15/98 - misc_81_fixes_1 -- fsanchez 05/12/98 - duplex_backup_set -- dbeusee 04/21/98 - rpt_redundancy_enh -- gpongrac 03/04/98 - fix getPackageVersion -- dbeusee 04/06/98 - xcheck enh. -- dbeusee 03/29/98 - Update comment about restore only (not true any m -- fsanchez 03/25/98 - Duplexed backup sets -- dbeusee 03/01/98 - list enh. -- gpongrac 01/23/98 - upgrade catalog_version to 8.0.5 -- dbeusee 02/11/98 - Fix bug 613166. -- fsanchez 01/04/98 - Allow setDatabase to receive dbid without dbname -- gpongrac 12/23/97 - change findcontrolfilebackup -- gpongrac 10/30/97 - bug 560638: fix validatebackupset -- gpongrac 10/02/97 - allow <= in setuntiltime and dbq -- gpongrac 09/03/97 - have dfcopy name xlate return filesize -- tpystyne 09/11/97 - bug 480172, fix name translation -- swerthei 08/29/97 - add getdatafile.read_only -- swerthei 08/18/97 - add getdatafile.stop_change# -- gpongrac 07/02/97 - change to version 8.0.4 -- gpongrac 07/01/97 - avoid offr records with stamp of 0 -- mzhou 08/07/97 - fix order by ts# -- gpongrac 07/22/97 - chang lrtbs cursor to order by tsid so system ts -- swerthei 04/23/97 - fix list backupset -- gpongrac 04/22/97 - offline range can end at resetlogs scn -- dalpern 04/16/97 - renamed as prvtrmnu, v. prvtrmns -- gpongrac 04/11/97 - add cfscn to computeRecoveryActions -- gpongrac 04/10/97 - add rlgtime to getRecoveryAction -- gpongrac 04/10/97 - fix incarnation check at top of computeRecov...0 -- gpongrac 04/04/97 - add translateBackupPieceTag -- gpongrac 04/04/97 - deal with controlfiles that have no offline range -- gpongrac 04/01/97 - add offline range info to computeRecoveryActions -- swerthei 03/27/97 - add getDataFile.ts_name -- tpystyne 03/20/97 - update catalog version to 8.00.03 -- swerthei 03/21/97 - fix merge error -- swerthei 03/12/97 - get file sizes in blocks, for metrics -- gpongrac 03/17/97 - fix cursor to deal with inplicit offline ranges c -- gpongrac 03/15/97 - add to_time for implicit offline ranges in comput -- gpongrac 03/10/97 - remove df_ckpscn as arg to openRecoveryActionCurs -- gpongrac 03/10/97 - add setDebugOn and setDebugOff -- gpongrac 03/03/97 - verify df_rlgscn in computeRecoveryActions0 -- gpongrac 02/28/97 - check allocate device types in addAction -- gpongrac 02/27/97 - only do partial recovery if have current controlf -- gpongrac 02/26/97 - fix compile errors -- gpongrac 02/26/97 - fix syntax errors -- gpongrac 02/26/97 - add new recovery functions -- gpongrac 02/20/97 - require offr.online_time be strictly less than un -- gpongrac 02/20/97 - simplify handling of until time/scn in where clau -- swerthei 01/21/97 - change parameters for translatebackupsetkey -- swerthei 01/15/97 - add dumpState -- gpongrac 01/14/97 - add getCloneName -- tpystyne 12/17/96 - add listRollbackSegTableSpace -- swerthei 01/08/97 - change parameters for backup piece translation -- swerthei 01/06/97 - continue REPORT DELETABLE -- gpongrac 01/06/97 - add nxtscn to getArchivedLog -- swerthei 01/03/97 - add getParentIncarnation -- ### comments from 1996 removed --------------------------------------------- -- *** PACKAGE VARIABLES/TYPES SECTION *** -- --------------------------------------------- ---------------------- -- Global Constants -- ---------------------- MAXSCNVAL CONSTANT number := 9e125; -- guaranteed higher than any SCN -- CURRENT online log NEXT_CHANGE# value MAXSCNVAL_NEXT_CHANGE CONSTANT number := 281474976710655; MAXSEQVAL CONSTANT number := 2**32-1; MINDATEVAL CONSTANT date := to_date('01/01/1900','MM/DD/YYYY'); MAXDATEVAL CONSTANT date := to_date('12/31/9999','MM/DD/YYYY'); CONST2GVAL CONSTANT number := 2**31; CONST4GVAL CONSTANT number := 2**32; -- keep types (see definitions in krmi.h and rcv/if/kcc3.h) KEEP_NO CONSTANT number := 0; KEEP_LOGS CONSTANT number := 256; KEEP_NOLOGS CONSTANT number := 512; KEEP_CONSIST CONSTANT number := 1024; DEB_UNDEF CONSTANT number := 0; DEB_PRINT CONSTANT number := 0; DEB_ENTER CONSTANT number := 1; DEB_EXIT CONSTANT number := 2; DEB_IN CONSTANT number := 3; DEB_OPEN CONSTANT number := 4; DEB_DEF_PNAME CONSTANT varchar2(50) := 'prvtrmnu'; ---------------------- -- Global Variables -- ---------------------- this_db_key number := NULL; this_dbinc_key number := NULL; this_reset_scn number := NULL; this_reset_time date; this_db_unique_name varchar2(30) := NULL; -- used only to identify rows for this_site_key number := NULL; -- configuration and flashback tbl -- this_site_key will be NULL for -- 9i RMAN client this_dummy_instance boolean := FALSE; -- following two variables are used to control how the name translation for -- all cursors work. It is possible that translation_site_key and this_site_key -- do not belong to same site. -- If the RMAN client is site aware, then this package will behave as if -- connected to 11G RMAN. Otherwise, the client must be pre-11G. Hence the old -- following behavior is kept intact: -- 1) datafile name translation returns filenames from primary... -- translation_site_key NUMBER := NULL; -- Never NULL realf_site_key number := NULL; -- override df/tf/online log txln user_site_key number := NULL; -- override log/backups/conf/rp txln user_db_unique_name varchar2(30) := NULL; -- corresponds to user_site_key val client_site_aware number := 0; -- The default behavior by the package is that the disk backup files and -- tape backup files are shared. However, following 3 variables are used to -- override this behavior. BTW, from 11gR1 onwards (i.e. when client_site_ware -- is 1), it is assumed that disk backups are not shared and but tape backups -- are shared, hence these variables are changed in setDatabase call. -- The backup files are backup pieces, image copies or proxy copies. logs_shared number := 0; -- used only when client_site_aware is 1 disk_backups_shared number := 1; -- indicates shared accross all sites tape_backups_shared number := 1; -- indicates shared accross all sites -- Following is translation behavior for all cursors, except unless noted -- For BS rows filtering: -- we don't have way to know where the backup set exists, hence -- filter backup sets only if all backup sets are not shared. Otherwise, -- return all backup sets, findValidBackupSet will anyway filter the -- the backup sets not accessible at given site. -- If user is interested in one specific site backups, just return those. -- rows with null site_key are always returned. -- For BP, AL, CDF, CCF, XDF, XAL, XCF, site_key indicates which site owns the -- file, hence it is used to filter the files not accessible as per file -- sharing attributes. -- When FOR DB_UNIQUE_NAME option is used, user_site_key or realf_site_key is -- set and in such case, all file sharing attributes are overridden and the -- translation returns the objects owned only by user specified site. Not even -- the site_key values with null are returned in this case, because those -- objects are anyway selected without using FOR DB_UNIQUE_NAME option. TYPE incarnation_set_c IS TABLE OF rc_database_incarnation%ROWTYPE index by binary_integer; inc_list incarnation_set_c; -- 0th one is current incarnation, see setDatabase max_inc_idx binary_integer; currInc binary_integer; -- temp variable used to keep track of incarnation type pnames is table of varchar2(50) index by binary_integer; pname_i number :=0; last_pnames pnames; debug boolean := FALSE; -------------------- -- rcvRec_t Stack -- -------------------- TYPE rcvRecTab_t IS TABLE OF rcvRec_t; -- recovery record stack type rcvRecStack rcvRecTab_t := rcvRecTab_t(); -- recovery record stack ----------------------- -- getPackageVersion -- ----------------------- TYPE versionList_t IS TABLE OF varchar2(11) INDEX BY binary_integer; versionList versionList_t; versionMaxIndex binary_integer; versionCounter binary_integer; ----------------- -- setDatabase -- ----------------- catalogVersion CONSTANT VARCHAR2(11) := '11.02.00.03'; -- For getParentIncarnation getParentIncarnationKey number; --------------------- -- Filtering Flags -- --------------------- allIncarnations number; -- allow records from non-current -- incarnation ignoreCreationSCN number; -- a stupid flag that is here -- only to provide behaviour that is -- backwards compatible with a kludge -- dbeusee put in to compensate for -- one of his bugs. -- Filters to ignore is_standby flag within the specified range. lbacked_al_next_scn NUMBER; standby_became_primary_scn NUMBER; -- Cache for archivelog filenames returned for same log sequence, used to -- filter duplicate names TYPE lognames_set_c IS TABLE OF al.fname%TYPE index by binary_integer; lognames_list lognames_set_c; -- All the log names returned for same logseq max_lognames_idx binary_integer; -- RMAN cannot yet apply redo from the non-current incarnation for -- pre 10i versions. For 10i version canApplyAnyRedo is TRUE. -- canApplyAnyRedo Means server can apply any redo known to incarnation -- records. So make sure from_scn is greater than last known incarn. canApplyAnyRedo number := FALSE#; -- This flag is used by computeRecoveryActions cursor. If set to TRUE -- then computeRecoveryActions will return all controlfile backups, -- including one from previous incarnations. We need this flag only -- becasue we want to be compatible with 9i RMAN using 10i catalog. craGetAllCfBackups number := FALSE#; -- RMAN can convert standby to backup and backup to standby controlfile since -- 11.0 onwards. This flag controls the behavior of cursors to select -- appropriate controlfile backups. canConvert_Cf number := FALSE#; redoRec rcvRec_t; untilSCN number; untilTime date; rpoint_set boolean; restoreSource number; restoreTag bp.tag%TYPE; onlyStandby number; ------------------- -- setDeviceType -- ------------------- TYPE deviceList_t IS TABLE OF rc_backup_piece.device_type%TYPE INDEX BY binary_integer; deviceList deviceList_t; deviceCount number; diskDevice boolean; anyDevice number; ------------------------- -- setRecoveryDestFile -- ------------------------- recoveryDestFile boolean; ----------------------------- -- RedoLog Deletion Policy -- ----------------------------- -- used by listBackup - cannot be set in recovery catalog package because -- this policy is solely used by recovery area algorithm. It is used only -- for system package listBackup function in order to prevent reporting -- obsolete archivelogs that are not APPLIED when deletion policy is -- APPLIED. -- redoLogDeletionPolicyType varchar2(512) := 'TO NONE'; ------------------------- -- extendKeepSCN Masks -- ------------------------- extendFullSCN CONSTANT BINARY_INTEGER := 2**0; extendIncrSCN CONSTANT BINARY_INTEGER := 2**1; extendLogSCN CONSTANT BINARY_INTEGER := 2**2; extendAllSCN CONSTANT BINARY_INTEGER := extendFullSCN + extendIncrSCN + extendLogSCN; ------------------------------- -- In Memory BackupSet Table -- ------------------------------- -- -- A PL/SQL memory table is constructed as cache to quickly validate a -- backupset. The cache is divided into 2 sections. -- a) backupset records that are sources of same set of files -- b) backupset records that are nearest to locality of reference -- Let's call them as REDUNDANT and LOCALITY section respectively. -- -- findValidBackupSet does nothing if there is a hit in cache memory for -- a given bskey. Otherwise, it re-loads both sections. -- -- Cache Hints used during loading noHint constant binary_integer := 0; redundantHint constant binary_integer := 1; localityHint constant binary_integer := 2; TYPE cacheBsRecRow_t IS RECORD ( deviceindx binary_integer, -- index into cacheBsRecTable.devicelist tag varchar2(32), -- may be null copyNumber binary_integer, -- null if code 2 or 3 code binary_integer -- 1 => same copy# -- 2 => mix of copy#s, but same tag -- 3 => mix of copy#s and tags ); TYPE cacheBsRecIndex_t IS TABLE OF cacheBsRecRow_t INDEX BY BINARY_INTEGER; TYPE cacheBsRecHash_t IS RECORD ( bskey number, -- backupset key mixcopy boolean := FALSE, -- TRUE if mixcopy can make set usuable copy cacheBsRecIndex_t -- list of copies ); TYPE cacheBsRecHashList_t IS TABLE OF cacheBsRecHash_t INDEX BY BINARY_INTEGER; TYPE cacheBsRecBsKey_t IS RECORD ( bsindex binary_integer := 1, -- index into bslist bslist cacheBsRecHashList_t -- list of backupset keys in hash table ); TYPE cacheBsRec_t IS TABLE OF cacheBsRecBsKey_t INDEX BY BINARY_INTEGER; TYPE cacheBsRecTable_t IS RECORD ( initlimit boolean := FALSE, -- is limit initialized? limit number := bsRecCacheLowLimit, -- cache size chit number := 0, -- no: of hits in cache mixcopy boolean := FALSE, -- does cache have mix of copyno? minbskey number := 0, -- minimum valid bskey hint binary_integer := noHint, -- access pattern hint -- 0 => default (red + locality) -- 1 => redundant (+ bck_type locality) -- 2 => locality -- cache qualifiers devicetype rc_backup_piece.device_type%TYPE, mask binary_integer, tag rc_backup_piece.tag%TYPE, devicelist deviceList_t, -- list of devices devicecount binary_integer := 0, -- Cache Set (Locality and Redundant) -- ================================== -- Locality cache contains records that are nearest to the current -- reference where as Redundant cache contains records that serves as -- backups for the source files of current reference. -- For instance, if current reference is for backupset with bskey=100, -- then locality cache contain records that are around bskey 100 (say -- 1 to 1k). On the other hand if bskey=100 is the backup for datafile -- sources (1, 2, 3), then Redundant cache contains records that serves -- as backup for source files (1, 2, 3). It is called as redundant because -- the cache contains redundant backups. -- Locality cache is useful to fetch latest backup whereas Redundant -- cache is useful for reports and list. -- -- Redundant cache is followed by Locality cache. -- -- For instance, -- cacheBsRecTable.bsRec().bslist(1..n).copy(1..n) -- is the way to access locality cache where is mod(bskey, 2**32) -- converted to negative value if >= 2**31, bslist is the list of backupset -- that hashed into same backupset and copy 1..n are the rows. -- bsRec cacheBsRec_t, -- Hit list is a list of backupset records that were recently accessed. -- While reloading cache, the hit list is always in cache. It is 25% -- of cache size. hitindex binary_integer := 1, -- index into hit list hitlist numTab_t -- Cache structure -- 1. When hint = noHint -- | 50% Redundant records | 25% Hit list | 25% Locality records | -- -- 2. When hint = redundantHint -- | 100% Redundant records | -- If cache is not full, then it filled up with 25% Hit list and 25% -- Locality records of same backup_type. -- 3. When hint = localityHint -- | 100% Locality records | -- ); cacheBsRecTable cacheBsRecTable_t; TYPE cacheRequest_t IS RECORD ( bskey number, icopy binary_integer ); findValidCacheRequest cacheRequest_t; ---------------------------- -- computeRecoveryActions -- ---------------------------- TYPE rcvRecStackState_t IS RECORD ( lowAction number, -- action with lowest from_scn on -- rcvRecStack that is not a fullKind. -- Usually points to top of stack, but -- if computeRA_allRecords is TRUE or -- computeRA_fullBackups > 1, -- then may point down into stack. savePoint number, -- most recently added full_act_t fullBackups number, -- number of full_act_t top number -- top of stack at start of a recursive -- test search of a possible parent -- incarnation ); rcvRecStackState rcvRecStackState_t; -- -- We will start recording actions only when rcvRecBackupAge = thisBackupAge. -- computeRA_allRecords number; -- do not stop at first full backup and -- return all incremental backups computeRA_fullBackups number; -- stop when reached these many full -- backups. computeRA_restorable boolean; -- cannot recover the datafile we've -- requested to recover, but there -- is a backup that we could restore -- and recover. computeRA_available boolean; -- there is a backup available on -- some non-allocated device type computeRA_availableMask binary_integer; computeRA_rcvCopy_avail boolean; -- there is a backup available on -- some non-allocated device type -- during recover copy command -- compilation -- computeRecoveryAction/addAction/addRedo return code values action_OK number := 0; action_FAIL number := 1; action_SKIP number := 2; action_OLD_REDO number := 3; action_WRONG_INCARNATION number := 4; action_OLD_INC_REDO number := 5; old_redo exception; -- redo from old incarnation pragma exception_init(old_redo, -20501); ----------------------- -- getRecoveryAction -- ----------------------- getRA_containerMask number; getRA_actionMask number; getRA_likePattern cdf.fname%TYPE; getRA_completedAfter date; getRA_completedBefore date; ---------------------- -- Recovery Record -- ---------------------- -- Both the below variables plays a role during backup source selection. -- Assume there are 1, 2, ... N valid backups and N is the last backup. -- Setting rcvRecBackupAge=0 will select the last backup ie N -- rcvRecBackupAge=1 will select the N-1 backup -- rcvRecbackupAge=x will selct the N-x backup -- computeRecoveryAction, findArchivelogBackup, getControlfileBackup -- getSpfileBackup are the backup source procedures where these -- values are interpreted. -- setRcvRecBackupAge and resetthisBackupAge are the procedures to -- set these values. Their default values is 0 so that always latest -- backup is choosen. -- rcvRecBackupAge number; -- requested age of backup thisBackupAge number; -- current age of backup getBS_status number; -- status of current backup ------------------------ -- Translation Clause -- ------------------------ tc_thread number; tc_fromTime date; tc_toTime date; tc_fromSCN number; tc_toSCN number; tc_fromSeq number; tc_toSeq number; tc_pattern varchar2(512); TYPE fileTab_t IS TABLE of boolean index by binary_integer; tc_fno fileTab_t; tc_database number; TYPE sequenceTab_t IS TABLE of boolean index by binary_integer; TYPE threadseqTab_t IS TABLE of sequenceTab_t index by binary_integer; tc_threadSeq threadSeqTab_t; TYPE dbidTab_t IS TABLE OF boolean index by binary_integer; tc_dbid dbidTab_t; tc_anydbid number; -------------------------- -- Transport tablespace -- -------------------------- -- Pre 11g RMAN client version cannot support transportable tablespace. -- For 11g RMAN client canHandleTransportableTbs is TRUE canHandleTransportableTbs number := FALSE#; ------------------------------------------------------------------------------ -- List Backup Cursor -- -- NOTE!! NOTE!! NOTE!! -- -- If you chance this cursor you have also to change the procedure -- -- openLbCursor(). The reason for having two version of the cursor -- -- are problems with OCI client with maintaing cursor refrence variable. -- ------------------------------------------------------------------------------ lb_NeedObsoleteData number := TRUE#; CURSOR listBackup_c RETURN lbRec_t IS SELECT -- Backup Sets bs.bs_key list_order1, 0 list_order2, bs.bs_key pkey, backupset_txt backup_type, backupset_txt file_type, decode(bs.keep_options, 0, 'NO', 'YES') keep, bs.keep_until keep_until, decode(bs.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', null) keep_options, null status, null fname, null tag, null media, bs.bs_recid recid, bs.bs_stamp stamp, null device_type, 0 block_size, bs.completion_time completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, null bytes, bs.bs_key bs_key, bs.set_count bs_count, bs.set_stamp bs_stamp, decode(bs.bck_type, 'L', archivedlog_txt, datafile_txt) bs_type, decode(bs.incr_level, 0, full_txt, 1, incr1_txt, 2, incr2_txt, 3, incr3_txt, 4, incr4_txt, decode(bs.bck_type, 'I', incr_txt, full_txt)) bs_incr_type, bs.pieces bs_pieces, null bs_copies, bs.completion_time bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, null df_file#, null df_tablespace, null df_resetlogs_change#, null df_creation_change#, null df_checkpoint_change#, null df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM bs WHERE bs.db_key = this_db_key AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) UNION ALL SELECT -- Backup Pieces bp.bs_key list_order1, 1 list_order2, bp.bp_key pkey, backupset_txt backup_type, piece_txt file_type, null keep, null keep_until, null keep_options, decode(bp.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, bp.handle fname, bp.tag tag, bp.media media, bp.bp_recid recid, bp.bp_stamp stamp, bp.device_type device_type, 0 block_size, bp.completion_time completion_time, bp.is_recovery_dest_file is_rdf, bp.compressed compressed, null obsolete, null keep_for_dbpitr, bp.bytes bytes, bp.bs_key bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, bp.piece# bp_piece#, bp.copy# bp_copy#, null df_file#, null df_tablespace, null df_resetlogs_change#, null df_creation_change#, null df_checkpoint_change#, null df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM bp WHERE bp.db_key = this_db_key AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) UNION ALL SELECT -- Backup Datafile bdf.bs_key list_order1, 2 list_order2, bdf.bdf_key pkey, backupset_txt backup_type, datafile_txt file_type, null keep, null keep_until, null keep_options, null status, null fname, null tag, null media, bdf.bdf_recid recid, bdf.bdf_stamp stamp, null device_type, bdf.block_size block_size, bdf.completion_time completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, bdf.block_size * bdf.blocks bytes, bdf.bs_key bs_key, null bs_count, null bs_stamp, null bs_type, decode(bdf.incr_level, 0, full_txt, 1, incr1_txt, 2, incr2_txt, 3, incr3_txt, 4, incr4_txt, decode(greatest(bdf.create_scn, bdf.incr_scn), bdf.create_scn, full_txt, incr_txt)) bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, bdf.file# df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, bdf.create_scn df_creation_change#, bdf.ckp_scn df_checkpoint_change#, bdf.ckp_time df_ckp_mod_time, bdf.incr_scn df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM bdf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = bdf.dbinc_key UNION ALL SELECT -- Backup Controlfile bcf.bs_key list_order1, 2 list_order2, bcf.bcf_key pkey, backupset_txt backup_type, controlfile_txt file_type, null keep, null keep_until, null keep_options, null status, null fname, null tag, null media, bcf.bcf_recid recid, bcf.bcf_stamp stamp, null device_type, bcf.block_size block_size, null completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, bcf.block_size * bcf.blocks bytes, bcf.bs_key bs_key, null bs_count, null bs_stamp, null bs_type, full_txt bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, 0 df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, 0 df_creation_change#, bcf.ckp_scn df_checkpoint_change#, bcf.ckp_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM bcf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = bcf.dbinc_key UNION ALL SELECT -- Backup Redo Log brl.bs_key list_order1, 2 list_order2, brl.brl_key pkey, backupset_txt backup_type, archivedlog_txt file_type, null keep, null keep_until, null keep_options, null status, null fname, null tag, null media, brl.brl_recid recid, brl.brl_stamp stamp, null device_type, brl.block_size block_size, null completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, brl.block_size * brl.blocks bytes, brl.bs_key bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, null df_file#, null df_tablespace, null df_resetlogs_change#, null df_creation_change#, null df_checkpoint_change#, null df_ckp_mod_time, null df_incremental_change#, brl.thread# rl_thread#, brl.sequence# rl_sequence#, dbinc.reset_scn rl_resetlogs_change#, brl.low_scn rl_first_change#, brl.low_time rl_first_time, brl.next_scn rl_next_change#, brl.next_time rl_next_time, null sf_db_unique_name FROM brl, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = brl.dbinc_key UNION ALL SELECT -- Backup spfile bsf.bs_key list_order1, 2 list_order2, bsf.bsf_key pkey, backupset_txt backup_type, spfile_txt file_type, null keep, null keep_until, null keep_options, null status, null fname, null tag, null media, bsf.bsf_recid recid, bsf.bsf_stamp stamp, null device_type, 0 block_size, null completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, bsf.bytes bytes, bsf.bs_key bs_key, null bs_count, null bs_stamp, null bs_type, full_txt bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, null df_file#, null df_tablespace, 0 df_resetlogs_change#, 0 df_creation_change#, 0 df_checkpoint_change#, bsf.modification_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, db_unique_name sf_db_unique_name FROM bsf WHERE bsf.db_key = this_db_key UNION ALL SELECT -- Datafile Copy cdf.cdf_key list_order1, -1 list_order2, cdf.cdf_key pkey, copy_txt backup_type, datafile_txt file_type, decode(cdf.keep_options, 0, 'NO', 'YES') keep, cdf.keep_until keep_until, decode(cdf.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, decode(cdf.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, cdf.fname fname, cdf.tag tag, null media, cdf.cdf_recid recid, cdf.cdf_stamp stamp, 'DISK' device_type, cdf.block_size block_size, cdf.completion_time completion_time, cdf.is_recovery_dest_file is_rdf, null compressed, null obsolete, null keep_for_dbpitr, cdf.block_size * cdf.blocks bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, cdf.file# df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, cdf.create_scn df_creation_change#, cdf.ckp_scn df_checkpoint_change#, cdf.ckp_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM cdf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = cdf.dbinc_key AND ((user_site_key = cdf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(cdf.site_key, this_site_key))))) UNION ALL SELECT -- Controlfile Copy ccf.ccf_key list_order1, -1 list_order2, ccf.ccf_key pkey, copy_txt backup_type, controlfile_txt file_type, decode(ccf.keep_options, 0, 'NO', 'YES') keep, ccf.keep_until keep_until, decode(ccf.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, decode(ccf.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, ccf.fname fname, ccf.tag tag, null media, ccf.ccf_recid recid, ccf.ccf_stamp stamp, 'DISK' device_type, ccf.block_size block_size, ccf.completion_time completion_time, ccf.is_recovery_dest_file is_rdf, null compressed, null obsolete, null keep_for_dbpitr, null bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, 0 df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, 0 df_creation_change#, ccf.ckp_scn df_checkpoint_change#, ccf.ckp_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM ccf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = ccf.dbinc_key AND ((user_site_key = ccf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(ccf.site_key, this_site_key))))) UNION ALL SELECT -- Archived Redo Log al.al_key list_order1, -1 list_order2, al.al_key pkey, copy_txt backup_type, archivedlog_txt file_type, null keep, null keep_until, null keep_options, decode(al.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, al.fname fname, null tag, null media, al.al_recid recid, al.al_stamp stamp, 'DISK' device_type, al.block_size block_size, al.completion_time completion_time, al.is_recovery_dest_file is_rdf, al.compressed compressed, null obsolete, null keep_for_dbpitr, al.block_size * al.blocks bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, null df_file#, null df_tablespace, null df_resetlogs_change#, null df_creation_change#, null df_checkpoint_change#, null df_ckp_mod_time, null df_incremental_change#, al.thread# rl_thread#, al.sequence# rl_sequence#, dbinc.reset_scn rl_resetlogs_change#, al.low_scn rl_first_change#, al.low_time rl_first_time, al.next_scn rl_next_change#, al.next_time rl_next_time, null sf_db_unique_name FROM dbinc, al LEFT OUTER JOIN grsp ON al.next_scn >= grsp.from_scn AND al.low_scn <= (grsp.to_scn + 1) AND al.dbinc_key = grsp.dbinc_key AND grsp.from_scn <= grsp.to_scn -- filter clean grp AND grsp.from_scn != 0 AND grsp.guaranteed = 'YES' WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = al.dbinc_key AND al.archived = 'Y' AND (lb_NeedObsoleteData = TRUE#) AND grsp.from_scn is null AND ((client_site_aware = TRUE# AND ((user_site_key = al.site_key) OR (user_site_key IS NULL AND (logs_shared = TRUE# OR this_site_key = nvl(al.site_key, this_site_key))))) OR (client_site_aware = FALSE#)) UNION ALL SELECT -- Datafile Proxy Copy xdf.xdf_key list_order1, -1 list_order2, xdf.xdf_key pkey, proxycopy_txt backup_type, datafile_txt file_type, decode(xdf.keep_options, 0, 'NO', 'YES') keep, xdf.keep_until keep_until, decode(xdf.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, decode(xdf.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, xdf.handle fname, xdf.tag tag, xdf.media media, xdf.xdf_recid recid, xdf.xdf_stamp stamp, xdf.device_type device_type, xdf.block_size block_size, xdf.completion_time completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, xdf.block_size * xdf.blocks bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, xdf.file# df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, xdf.create_scn df_creation_change#, xdf.ckp_scn df_checkpoint_change#, xdf.ckp_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM xdf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = xdf.dbinc_key AND ((user_site_key = xdf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xdf.site_key, this_site_key))))) UNION ALL SELECT -- Controlfile Proxy Copy xcf.xcf_key list_order1, -1 list_order2, xcf.xcf_key pkey, proxycopy_txt backup_type, controlfile_txt file_type, decode(xcf.keep_options, 0, 'NO', 'YES') keep, xcf.keep_until keep_until, decode(xcf.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, decode(xcf.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, xcf.handle fname, xcf.tag tag, xcf.media media, xcf.xcf_recid recid, xcf.xcf_stamp stamp, xcf.device_type device_type, xcf.block_size block_size, xcf.completion_time completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, null bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, 0 df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, 0 df_creation_change#, xcf.ckp_scn df_checkpoint_change#, xcf.ckp_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM xcf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = xcf.dbinc_key AND ((user_site_key = xcf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xcf.site_key, this_site_key))))) UNION ALL SELECT -- Archivelog Proxy Copy xal.xal_key list_order1, -1 list_order2, xal.xal_key pkey, proxycopy_txt backup_type, archivedlog_txt file_type, decode(xal.keep_options, 0, 'NO', 'YES') keep, xal.keep_until keep_until, decode(xal.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, decode(xal.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, xal.handle fname, xal.tag tag, xal.media media, xal.xal_recid recid, xal.xal_stamp stamp, xal.device_type device_type, xal.block_size block_size, xal.completion_time completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, xal.block_size * xal.blocks bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, null df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, 0 df_creation_change#, null df_checkpoint_change#, null df_ckp_mod_time, null df_incremental_change#, xal.thread# rl_thread#, xal.sequence# rl_sequence#, dbinc.reset_scn rl_resetlogs_change#, xal.low_scn rl_first_change#, xal.low_time rl_first_time, xal.next_scn rl_next_change#, xal.next_time rl_next_time, null sf_db_unique_name FROM xal, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = xal.dbinc_key AND ((user_site_key = xal.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xal.site_key, this_site_key))))) -- We order by list_order so that object from same backupset -- (backup datafiles, backupset, and piece records) came together. ORDER BY list_order1, list_order2, bp_piece#; ------------------------ -- Controlfile Backup -- ------------------------ -- NOTE - Bug 4214635 -- The needstby flag is ignored for cf log translation for few commands, but -- it is used in catalog mode by RMAN executable to filter out archived logs -- based on the role of database when logs were created. CURSOR findControlfileBackup_c( sourcemask IN number ,currentIncarnation IN number DEFAULT TRUE# ,tag IN varchar2 DEFAULT NULL ,pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,untilSCN IN number DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable -- not used for backupset type ,needstby IN number DEFAULT NULL ,typemask IN binary_integer DEFAULT BScfile_all -- used only for backupset type ) RETURN rcvRec_t IS SELECT imageCopy_con_t type_con, ccf_key key_con, ccf_recid recid_con, ccf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, fname fileName_con, tag tag_con, to_number(null) copyNumber_con, status status_con, to_number(null) blocks_con, -- ccf doesn't have blocks block_size blockSize_con, 'DISK' deviceType_con, completion_time compTime_con, create_time cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, ccf.ckp_scn toSCN_act, ccf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, ccf.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, nvl(controlfile_type, 'B') cfType_obj, ccf.keep_options keep_options, ccf.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, is_recovery_dest_file isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM ccf, dbinc WHERE dbinc.db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = ccf.dbinc_key AND (findControlfileBackup_c.currentIncarnation = FALSE# OR this_dbinc_key = ccf.dbinc_key) AND (findControlfileBackup_c.tag is NULL OR findControlfileBackup_c.tag = tag) AND (findControlfileBackup_c.pattern is NULL OR fname LIKE replace(replace(findControlfileBackup_c.pattern, '*','**'), '_', '*_') ESCAPE '*') AND (findControlfileBackup_c.completedAfter is NULL OR completion_time >= findControlfileBackup_c.completedAfter) AND (findControlfileBackup_c.completedBefore is NULL OR completion_time <= findControlfileBackup_c.completedBefore) AND (findControlfileBackup_c.untilSCN is NULL OR ccf.ckp_scn <= findControlfileBackup_c.untilSCN) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND (needstby is NULL OR nvl(controlfile_type,'B') = decode(needstby, TRUE#, 'S', 'B') OR canConvert_Cf = TRUE#) AND (sourcemask is NULL OR bitand(sourcemask, imageCopy_con_t) != 0) AND ((user_site_key = ccf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(ccf.site_key, this_site_key))))) UNION ALL SELECT proxyCopy_con_t type_con, xcf_key key_con, xcf_recid recid_con, xcf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, handle fileName_con, tag tag_con, to_number(null) copyNumber_con, status status_con, to_number(null) blocks_con, -- xcf doesn't have blocks block_size blockSize_con, device_type deviceType_con, completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, xcf.ckp_scn toSCN_act, xcf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, xcf.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, nvl(controlfile_type, 'B') cfType_obj, xcf.keep_options keep_options, xcf.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, media media_con, 'NO' isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xcf, dbinc WHERE db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = xcf.dbinc_key AND (findControlfileBackup_c.currentIncarnation = FALSE# OR this_dbinc_key = xcf.dbinc_key) AND (findControlfileBackup_c.tag is NULL OR findControlfileBackup_c.tag = tag) AND (findControlfileBackup_c.pattern is NULL OR handle LIKE replace(replace(findControlfileBackup_c.pattern, '*','**'), '_', '*_') ESCAPE '*') AND (findControlfileBackup_c.completedAfter is NULL OR completion_time >= findControlfileBackup_c.completedAfter) AND (findControlfileBackup_c.completedBefore is NULL OR completion_time <= findControlfileBackup_c.completedBefore) AND (findControlfileBackup_c.untilSCN is NULL OR xcf.ckp_scn <= findControlfileBackup_c.untilSCN) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND (needstby is NULL OR nvl(controlfile_type,'B') = decode(needstby, TRUE#, 'S', 'B') OR canConvert_Cf = TRUE#) AND (sourcemask is NULL OR bitand(sourcemask, proxyCopy_con_t) != 0) AND ((user_site_key = xcf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xcf.site_key, this_site_key))))) UNION ALL SELECT backupSet_con_t type_con, bcf_key key_con, bcf_recid recid_con, bcf_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, bs.incr_level bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, bcf.blocks blocks_con, bcf.block_size blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, bcf.create_time cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, full_act_t type_act, 0 fromSCN_act, bcf.ckp_scn toSCN_act, bcf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, bcf.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, bcf.autobackup_sequence cfSequence_obj, bcf.autobackup_date cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, nvl(controlfile_type, 'B') cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM bs, bcf, dbinc WHERE dbinc.db_key = this_db_key -- belongs to this database AND bs.db_key = this_db_key -- belongs to this database AND bcf.dbinc_key = dbinc.dbinc_key -- join bcf and dbinc AND bcf.bs_key = bs.bs_key -- join bcf and bs AND bs.bck_type != 'L' -- ignore archivelog backups AND (findControlfileBackup_c.currentIncarnation = FALSE# OR this_dbinc_key = bcf.dbinc_key) AND (findControlfileBackup_c.completedAfter is NULL OR bs.completion_time >= findControlfileBackup_c.completedAfter) AND (findControlfileBackup_c.completedBefore is NULL OR bs.completion_time <= findControlfileBackup_c.completedBefore) AND (findControlfileBackup_c.untilSCN is NULL OR bcf.ckp_scn <= findControlfileBackup_c.untilSCN) AND (needstby is NULL OR nvl(controlfile_type,'B') = decode(needstby, TRUE#, 'S', 'B') OR canConvert_Cf = TRUE#) AND ((typemask = 0 AND bcf.autobackup_date IS NULL) OR -- no autobackups (bitand(typemask, BScfile_all) != 0) OR -- all backups (bcf.autobackup_date IS NOT NULL AND -- only autobackups bitand(typemask, BScfile_auto) != 0)) AND (sourcemask is NULL OR bitand(sourcemask, backupSet_con_t) != 0) AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) ORDER BY toSCN_act desc, stamp_con desc; -------------------- -- SPFILE Backups -- -------------------- CURSOR findSpfileBackup_c( completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,untilTime IN date DEFAULT NULL ,rmanCmd IN number DEFAULT unknownCmd_t) RETURN rcvRec_t IS -- for non-obsolete command SELECT backupSet_con_t type_con, bsf_key key_con, bsf_recid recid_con, bsf_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, bs.incr_level bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, 0 blocks_con, 0 blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, full_act_t type_act, 0 fromSCN_act, 0 toSCN_act, nvl(modification_time, bs.completion_time) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, to_number(null) dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, db_unique_name sfDbUniqueName_obj FROM bsf, bs, db -- NOTE!! NOTE!! NOTE!! -- If you add/change conditional clause, then make sure you do the -- same change in 'Backup Sets (for report obsolete cmd)' query. WHERE rmanCmd != obsoleteCmd_t AND bsf.bs_key = bs.bs_key -- join bsf and bs AND bs.bck_type != 'L' -- ignore archivelog backups AND bs.db_key = this_db_key -- belongs to this database AND bsf.db_key = db.db_key -- join bsf and db AND (findSpfileBackup_c.completedAfter is NULL OR bs.completion_time >= findSpfileBackup_c.completedAfter) AND (findSpfileBackup_c.completedBefore is NULL OR bs.completion_time <= findSpfileBackup_c.completedBefore) AND (findSpfileBackup_c.untilTime is NULL OR nvl(modification_time,bs.start_time) <= findSpfileBackup_c.untilTime) AND (rmanCmd != restoreCmd_t OR (rmanCmd = restoreCmd_t AND (bsf.db_unique_name is NULL OR nvl(user_db_unique_name, this_db_unique_name) = bsf.db_unique_name))) AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) UNION ALL -- for oboslete command SELECT backupSet_con_t type_con, bsf_key key_con, bsf_recid recid_con, bsf_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, bs.incr_level bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, 0 blocks_con, 0 blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, full_act_t type_act, 0 fromSCN_act, 0 toSCN_act, nvl(modification_time, bs.completion_time) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, to_number(null) dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, db_unique_name sfDbUniqueName_obj FROM bsf, bs, db, (SELECT bs_key, count(distinct piece#) pieces FROM bp WHERE rmanCmd = obsoleteCmd_t AND bp.db_key = this_db_key -- this database AND bp.status = 'A' AND (anyDevice = TRUE# OR isDeviceTypeAllocated(bp.device_type) = TRUE#) AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared=TRUE# AND bp.device_type='DISK') OR (tape_backups_shared=TRUE# AND bp.device_type<>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs_key, device_type) bp -- NOTE!! NOTE!! NOTE!! -- If you add/change conditional clause, then make sure you do the -- same change in 'Backup Sets (for non-report obsolete cmd)' query. WHERE rmanCmd = obsoleteCmd_t AND bsf.bs_key = bs.bs_key -- join bsf and bs AND bs.bck_type != 'L' -- ignore archivelog backups AND bs.db_key = this_db_key -- belongs to this database AND bs.bs_key = bp.bs_key -- join bs and bp AND bs.pieces = bp.pieces AND bsf.db_key = db.db_key -- join bsf and db AND (findSpfileBackup_c.completedAfter is NULL OR bs.completion_time >= findSpfileBackup_c.completedAfter) AND (findSpfileBackup_c.completedBefore is NULL OR bs.completion_time <= findSpfileBackup_c.completedBefore) AND (findSpfileBackup_c.untilTime is NULL OR nvl(modification_time,bs.start_time) <= findSpfileBackup_c.untilTime) ORDER BY toTime_act desc, -- for finding best backup stamp_con desc; -- to get most recent --------------------- -- Datafile Backup -- --------------------- getDatafileBackupLast rcvRec_t; CURSOR findDatafileBackup_c( sourcemask IN number ,fno IN number DEFAULT NULL ,crescn IN number DEFAULT NULL -- makes sense when fno != NULL ,tag IN varchar2 DEFAULT NULL ,pattern IN varchar2 DEFAULT NULL ,reset_scn IN number DEFAULT NULL ,reset_time IN date DEFAULT NULL ,level IN number DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,untilSCN IN number DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable -- ignored for backupset because we didn't join bp ,onlyrdf IN binary_integer DEFAULT 0 -- return only recovery area files ,duplicates IN number DEFAULT NULL ,onlytc IN binary_integer DEFAULT FALSE# -- return only file that is translated ,pluginSCN IN number DEFAULT 0 ) RETURN rcvRec_t IS SELECT imageCopy_con_t type_con, cdf.cdf_key key_con, cdf.cdf_recid recid_con, cdf.cdf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, cdf.fname fileName_con, cdf.tag tag_con, to_number(null) copyNumber_con, cdf.status status_con, cdf.blocks blocks_con, cdf.block_size blockSize_con, 'DISK' deviceType_con, cdf.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, cdf.ckp_scn toSCN_act, cdf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, cdf.dbinc_key dbincKey_act, cdf.incr_level level_act, 0 section_size_act, cdf.file# dfNumber_obj, cdf.create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, cdf.keep_options keep_options, cdf.keep_until keep_until, cdf.abs_fuzzy_scn afzSCN_act, cdf.rcv_fuzzy_time rfzTime_act, cdf.rcv_fuzzy_scn rfzSCN_act, to_char(null) media_con, is_recovery_dest_file isrdf_con, site_key site_key_con, cdf.foreign_dbid foreignDbid_obj, decode(cdf.plugged_readonly, 'YES', 1, 0) pluggedRonly_obj, cdf.plugin_scn pluginSCN_obj, cdf.plugin_reset_scn pluginRlgSCN_obj, cdf.plugin_reset_time pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, decode(cdf.plugged_readonly, 'NO', cdf.ckp_scn, cdf.plugin_scn) newToSCN_act, decode(cdf.plugin_scn, 0, dbinc.reset_scn, cdf.plugin_reset_scn) newRlgSCN_act, decode(cdf.plugin_scn, 0, dbinc.reset_time, cdf.plugin_reset_time) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM cdf, dbinc, (SELECT DISTINCT max(cdf_recid) duprecid FROM cdf, dbinc WHERE (findDatafileBackup_c.tag is NULL OR tag = findDatafileBackup_c.tag) AND cdf.dbinc_key = dbinc.dbinc_key AND dbinc.db_key = this_db_key AND (findDatafileBackup_c.pattern is NULL OR cdf.fname LIKE replace(replace(findDatafileBackup_c.pattern, '*','**'), '_', '*_') ESCAPE '*') GROUP BY cdf.file#, cdf.create_scn, dbinc.reset_scn, dbinc.reset_time, cdf.ckp_time, cdf.ckp_scn, cdf.abs_fuzzy_scn, cdf.rcv_fuzzy_scn, cdf.bck_fuzzy, cdf.onl_fuzzy, dbinc.db_key, cdf.plugin_scn, cdf.plugin_reset_scn, cdf.plugin_reset_time) dup WHERE cdf.cdf_recid = dup.duprecid(+) AND (sourcemask is NULL OR bitand(sourcemask, imageCopy_con_t) != 0) AND (dbinc.db_key = this_db_key) -- belongs to this database AND (dbinc.dbinc_key = cdf.dbinc_key) -- join cdf and dbinc AND (findDatafileBackup_c.reset_scn is NULL OR canApplyAnyRedo = TRUE# OR (cdf.plugged_readonly = 'NO' AND findDatafileBackup_c.reset_scn = dbinc.reset_scn AND findDatafileBackup_c.reset_time = dbinc.reset_time) OR (cdf.plugged_readonly = 'YES' AND findDatafileBackup_c.reset_scn = cdf.plugin_reset_scn AND findDatafileBackup_c.reset_time = cdf.plugin_reset_time)) AND cdf.file# = nvl(findDatafileBackup_c.fno, cdf.file#) -- The next condition is redundant, as there should never be controlfile -- copies in the cdf table, unless we have a bug in resync... AND cdf.file# != 0 -- no ctrl bkps AND (onlytc = FALSE# OR tc_database = TRUE# OR isTranslatedFno(cdf.file#) = TRUE#) -- only tnslated files AND ((findDatafileBackup_c.pluginSCN = 0 AND cdf.plugin_scn = 0 AND cdf.create_scn = findDatafileBackup_c.crescn) OR (findDatafileBackup_c.pluginSCN != 0 AND cdf.plugin_scn = findDatafileBackup_c.pluginSCN) OR (findDatafileBackup_c.pluginSCN = 0 AND findDatafileBackup_c.crescn IS NULL)) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND (findDatafileBackup_c.tag is NULL OR tag = findDatafileBackup_c.tag) AND (findDatafileBackup_c.pattern is NULL OR cdf.fname LIKE replace(replace(findDatafileBackup_c.pattern, '*','**'), '_', '*_') ESCAPE '*') AND (findDatafileBackup_c.completedAfter is NULL OR cdf.completion_time >= findDatafileBackup_c.completedAfter) AND (findDatafileBackup_c.completedBefore is NULL OR cdf.completion_time <= findDatafileBackup_c.completedBefore) AND (findDatafileBackup_c.untilSCN is NULL OR (cdf.plugged_readonly = 'NO' AND cdf.ckp_scn <= findDatafileBackup_c.untilSCN) OR (cdf.plugged_readonly = 'YES' AND cdf.plugin_scn <= findDatafileBackup_c.untilSCN)) AND (findDatafileBackup_c.level is NULL OR cdf.incr_level <= findDatafileBackup_c.level) AND (findDatafileBackup_c.onlyrdf = 0 OR cdf.is_recovery_dest_file = 'YES') AND (duplicates IS NULL OR duplicates = TRUE# OR (duplicates = FALSE# AND duprecid IS NOT NULL)) AND ((user_site_key = cdf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(cdf.site_key, this_site_key))))) UNION ALL SELECT proxyCopy_con_t type_con, xdf.xdf_key key_con, xdf.xdf_recid recid_con, xdf.xdf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, xdf.handle fileName_con, xdf.tag tag_con, to_number(null) copyNumber_con, xdf.status status_con, xdf.blocks blocks_con, xdf.block_size blockSize_con, xdf.device_type deviceType_con, xdf.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, xdf.ckp_scn toSCN_act, xdf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, xdf.dbinc_key dbincKey_act, xdf.incr_level level_act, 0 section_size_act, xdf.file# dfNumber_obj, xdf.create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, xdf.keep_options keep_options, xdf.keep_until keep_until, xdf.abs_fuzzy_scn afzSCN_act, xdf.rcv_fuzzy_time rfzTime_act, xdf.rcv_fuzzy_scn rfzSCN_act, xdf.media media_con, 'NO' isrdf_con, site_key site_key_con, xdf.foreign_dbid foreignDbid_obj, decode(xdf.plugged_readonly, 'YES', 1, 0) pluggedRonly_obj, xdf.plugin_scn pluginSCN_obj, xdf.plugin_reset_scn pluginRlgSCN_obj, xdf.plugin_reset_time pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, decode(xdf.plugged_readonly, 'NO', xdf.ckp_scn, xdf.plugin_scn) newToSCN_act, decode(xdf.plugin_reset_scn, 0, dbinc.reset_scn, xdf.plugin_reset_scn) newRlgSCN_act, nvl(xdf.plugin_reset_time, dbinc.reset_time) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xdf, dbinc WHERE (sourcemask is NULL OR bitand(sourcemask, proxyCopy_con_t) != 0) AND (findDatafileBackup_c.onlyrdf = 0) AND (dbinc.db_key = this_db_key) -- belongs to this database AND (dbinc.dbinc_key = xdf.dbinc_key) -- join xdf and dbinc AND (findDatafileBackup_c.reset_scn is NULL OR canApplyAnyRedo = TRUE# OR (xdf.plugged_readonly = 'NO' AND findDatafileBackup_c.reset_scn = dbinc.reset_scn AND findDatafileBackup_c.reset_time = dbinc.reset_time) OR (xdf.plugged_readonly = 'YES' AND findDatafileBackup_c.reset_scn = xdf.plugin_reset_scn AND findDatafileBackup_c.reset_time = xdf.plugin_reset_time)) AND xdf.file# = nvl(findDatafileBackup_c.fno, xdf.file#) AND xdf.file# != 0 -- no ctrl bkps AND (onlytc = FALSE# OR tc_database = TRUE# OR isTranslatedFno(xdf.file#) = TRUE#) -- only tnslated files AND ((findDatafileBackup_c.pluginSCN = 0 AND xdf.plugin_scn = 0 AND xdf.create_scn = findDatafileBackup_c.crescn) OR (findDatafileBackup_c.pluginSCN != 0 AND xdf.plugin_scn = findDatafileBackup_c.pluginSCN) OR (findDatafileBackup_c.pluginSCN = 0 AND findDatafileBackup_c.crescn IS NULL)) AND decode(statusMask, BSavailable, decode(xdf.status, 'A', TRUE#, FALSE#), isStatusMatch(xdf.status, statusMask)) = TRUE# AND (findDatafileBackup_c.tag is NULL OR xdf.tag = findDatafileBackup_c.tag) AND (findDatafileBackup_c.pattern is NULL OR xdf.handle LIKE replace(replace(findDatafileBackup_c.pattern, '*','**'), '_', '*_') ESCAPE '*') AND (findDatafileBackup_c.completedAfter is NULL OR xdf.completion_time >= findDatafileBackup_c.completedAfter) AND (findDatafileBackup_c.completedBefore is NULL OR xdf.completion_time <= findDatafileBackup_c.completedBefore) AND (findDatafileBackup_c.untilSCN is NULL OR (xdf.plugged_readonly = 'NO' AND xdf.ckp_scn <= findDatafileBackup_c.untilSCN) OR (xdf.plugged_readonly = 'YES' AND xdf.plugin_scn <= findDatafileBackup_c.untilSCN)) AND (findDatafileBackup_c.level is NULL OR xdf.incr_level <= findDatafileBackup_c.level) AND dbinc.db_key=this_db_key AND ((user_site_key = xdf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xdf.site_key, this_site_key))))) UNION ALL SELECT backupSet_con_t type_con, bdf.bdf_key key_con, bdf.bdf_recid recid_con, bdf.bdf_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, bs.incr_level bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, bdf.blocks blocks_con, bdf.block_size blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, decode(bdf.incr_scn, 0, full_act_t, incremental_act_t) type_act, bdf.incr_scn fromSCN_act, bdf.ckp_scn toSCN_act, bdf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, bdf.dbinc_key dbincKey_act, bdf.incr_level level_act, bdf.section_size section_size_act, bdf.file# dfNumber_obj, bdf.create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, bdf.abs_fuzzy_scn afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, bdf.foreign_dbid foreignDbid_obj, decode(bdf.plugged_readonly, 'YES', 1, 0) pluggedRonly_obj, bdf.plugin_scn pluginSCN_obj, bdf.plugin_reset_scn pluginRlgSCN_obj, bdf.plugin_reset_time pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, decode(bdf.plugged_readonly, 'NO', bdf.ckp_scn, bdf.plugin_scn) newToSCN_act, decode(bdf.plugin_scn, 0, dbinc.reset_scn, bdf.plugin_reset_scn) newRlgSCN_act, decode(bdf.plugin_scn, 0, dbinc.reset_time, bdf.plugin_reset_time) newRlgTime_act, to_char(null) sfDbUniqueName_obj -- NOTE!! NOTE!! never change the order of tables. This will change -- execution plan and result in poor performance for rule based -- optimizer FROM bs, bdf, dbinc WHERE (sourcemask is NULL OR bitand(sourcemask, backupSet_con_t) != 0) AND (findDatafileBackup_c.onlyrdf = 0) AND (dbinc.db_key = this_db_key) -- belongs to this database AND (bs.db_key = this_db_key) -- belongs to this database AND (bdf.dbinc_key = dbinc.dbinc_key) -- join bdf and dbinc AND (bdf.bs_key = bs.bs_key) -- join bdf and bs AND bs.bck_type != 'L' -- only datafile backups AND (findDatafileBackup_c.reset_scn is NULL OR canApplyAnyRedo = TRUE# OR (bdf.plugged_readonly = 'NO' AND findDatafileBackup_c.reset_scn = dbinc.reset_scn AND findDatafileBackup_c.reset_time = dbinc.reset_time) OR (bdf.plugged_readonly = 'YES' AND findDatafileBackup_c.reset_scn = bdf.plugin_reset_scn AND findDatafileBackup_c.reset_time = bdf.plugin_reset_time)) AND bdf.file# = nvl(findDatafileBackup_c.fno, bdf.file#) AND bdf.file# != 0 -- no ctrl bkps AND (onlytc = FALSE# OR tc_database = TRUE# OR isTranslatedFno(bdf.file#) = TRUE#) -- only tnslated files AND ((findDatafileBackup_c.pluginSCN = 0 AND bdf.plugin_scn = 0 AND bdf.create_scn = findDatafileBackup_c.crescn) OR (findDatafileBackup_c.pluginSCN != 0 AND bdf.plugin_scn = findDatafileBackup_c.pluginSCN) OR (findDatafileBackup_c.pluginSCN = 0 AND findDatafileBackup_c.crescn IS NULL)) AND (findDatafileBackup_c.completedAfter is NULL OR bs.completion_time >= findDatafileBackup_c.completedAfter) AND (findDatafileBackup_c.completedBefore is NULL OR bs.completion_time <= findDatafileBackup_c.completedBefore) AND (findDatafileBackup_c.untilSCN is NULL OR (bdf.plugged_readonly = 'NO' AND bdf.ckp_scn <= findDatafileBackup_c.untilSCN) OR (bdf.plugged_readonly = 'YES' AND bdf.plugin_scn <= findDatafileBackup_c.untilSCN)) AND (findDatafileBackup_c.level is NULL OR bdf.incr_level <= findDatafileBackup_c.level) AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) ORDER BY dfNumber_obj, -- dfNumber_obj newRlgSCN_act desc, -- rlgSCN_act, last incarnation first newRlgTime_act desc, -- rlgTime_act newToSCN_act desc, -- toSCN_act stamp_con desc; -- stamp_con CURSOR findDatafileCopyKey( copyKey IN number ,statusMask IN binary_integer) RETURN rcvRec_t IS -- Replaces these cursors: cursor in translateDataFileCopyKey SELECT imageCopy_con_t type_con, cdf_key key_con, cdf_recid recid_con, cdf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, fname fileName_con, tag tag_con, to_number(null) copyNumber_con, status status_con, blocks blocks_con, block_size blockSize_con, 'DISK' deviceType_con, completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, cdf.ckp_scn toSCN_act, cdf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, cdf.dbinc_key dbincKey_act, incr_level level_act, 0 section_size_act, file# dfNumber_obj, create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, cdf.keep_options keep_options, cdf.keep_until keep_until, cdf.abs_fuzzy_scn afzSCN_act, cdf.rcv_fuzzy_time rfzTime_act, cdf.rcv_fuzzy_scn rfzSCN_act, to_char(null) media_con, is_recovery_dest_file isrdf_con, site_key site_key_con, cdf.foreign_dbid foreignDbid_obj, decode(cdf.plugged_readonly, 'YES', 1, 0) pluggedRonly_obj, cdf.plugin_scn pluginSCN_obj, cdf.plugin_reset_scn pluginRlgSCN_obj, cdf.plugin_reset_time pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM cdf, dbinc WHERE dbinc.db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = cdf.dbinc_key -- join cdf and dbinc AND (findDatafileCopyKey.copyKey = cdf_key) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND ((user_site_key = cdf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(cdf.site_key, this_site_key))))) ORDER BY dfNumber_obj, -- for duplicate filtering decode(pluggedRonly_obj, 0, toSCN_act, pluginSCN_obj) desc, -- for tag translation stamp_con desc; -- to get most recent CURSOR findControlFileCopyKey( copyKey IN number ,statusMask IN binary_integer) RETURN rcvRec_t IS -- Replaces these cursors: cursor in translateControlFileCopyKey SELECT imageCopy_con_t type_con, ccf_key key_con, ccf_recid recid_con, ccf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, fname fileName_con, tag tag_con, to_number(null) copyNumber_con, status status_con, to_number(null) blocks_con, block_size blockSize_con, 'DISK' deviceType_con, completion_time compTime_con, create_time cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, ccf.ckp_scn toSCN_act, ccf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, ccf.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, nvl(controlfile_type, 'B') cfType_obj, ccf.keep_options keep_options, ccf.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, is_recovery_dest_file isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM ccf, dbinc WHERE dbinc.db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = ccf.dbinc_key -- join cdf and dbinc AND (findControlFileCopyKey.copyKey = ccf_key) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND ((user_site_key = ccf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(ccf.site_key, this_site_key))))) ORDER BY toSCN_act desc, -- for tag translation stamp_con desc; -- to get most recent CURSOR findBackupsetFiles( bskey IN number) RETURN rcvRec_t IS -- datafiles SELECT backupSet_con_t type_con, bdf_key key_con, bdf_recid recid_con, bdf_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, bs.incr_level bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, bdf.blocks blocks_con, bdf.block_size blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, decode(bdf.incr_scn, 0, full_act_t, incremental_act_t) type_act, bdf.incr_scn fromSCN_act, bdf.ckp_scn toSCN_act, bdf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, bdf.dbinc_key dbincKey_act, bdf.incr_level level_act, bdf.section_size section_size_act, file# dfNumber_obj, create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, bdf.abs_fuzzy_scn afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, bdf.foreign_dbid foreignDbid_obj, decode(bdf.plugged_readonly, 'YES', 1, 0) pluggedRonly_obj, bdf.plugin_scn pluginSCN_obj, bdf.plugin_reset_scn pluginRlgSCN_obj, bdf.plugin_reset_time pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM bdf, bs, dbinc WHERE (allIncarnations = TRUE# OR canApplyAnyRedo = TRUE# OR dbinc.dbinc_key = this_dbinc_key) AND dbinc.db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = bdf.dbinc_key -- join bdf and dbinc AND bdf.bs_key = bs.bs_key -- join bdf and bs AND bs.bs_key = bskey AND bs.bck_type != 'L' -- only datafile backups AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) UNION ALL -- controlfile SELECT backupSet_con_t type_con, bcf_key key_con, bcf_recid recid_con, bcf_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, bs.incr_level bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, nvl(bcf.blocks,0) blocks_con, bcf.block_size blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, full_act_t type_act, to_number(null) fromSCN_act, bcf.ckp_scn toSCN_act, bcf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, bcf.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, bcf.autobackup_sequence cfSequence_obj, bcf.autobackup_date cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, nvl(controlfile_type, 'B') cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM bcf, bs, dbinc WHERE (allIncarnations = TRUE# OR canApplyAnyRedo = TRUE# OR dbinc.dbinc_key = this_dbinc_key) AND dbinc.db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = bcf.dbinc_key -- join bcf and dbinc AND bcf.bs_key = bs.bs_key -- join bcf and bs AND bs.bs_key = bskey AND bs.bck_type != 'L' -- ignore archivelog backups AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) UNION ALL -- SPFILE SELECT backupSet_con_t type_con, bsf_recid key_con, bsf_recid recid_con, bsf_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, bs.incr_level bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, 0 blocks_con, 0 blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, full_act_t type_act, 0 fromSCN_act, 0 toSCN_act, modification_time toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, to_number(null) dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM bsf, bs, db WHERE bsf.bs_key = bs.bs_key -- join bsf and bs AND bs.db_key = this_db_key -- belongs to this database AND bsf.db_key = db.db_key -- join bsf and db AND bs.bs_key = bskey AND bs.bck_type != 'L' -- ignore archivelog backups AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) UNION ALL -- archived logs SELECT backupSet_con_t type_con, brl.brl_key key_con, brl.brl_recid recid_con, brl.brl_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, to_number(null) bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, brl.blocks blocks_con, brl.block_size blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, full_act_t type_act, to_number(null) fromSCN_act, to_number(null) toSCN_act, to_date(null) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, brl.sequence# logSequence_obj, brl.thread# logThread_obj, dbinc.reset_scn logRlgSCN_obj, dbinc.reset_time logRlgTime_obj, brl.low_scn logLowSCN_obj, brl.low_time logLowTime_obj, brl.next_scn logNextSCN_obj, brl.next_time logNextTime_obj, brl.terminal logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM brl, bs, dbinc WHERE (allIncarnations = TRUE# OR canApplyAnyRedo = TRUE# OR dbinc.dbinc_key = this_dbinc_key) AND dbinc.db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = brl.dbinc_key -- join brl and dbinc AND brl.bs_key = bs.bs_key -- join brl and bs AND bs.bs_key = bskey AND bs.bck_type = 'L' -- only archivelog backups AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) -- For terminal logs, records marked 'YES' must be first, 'NO' last ORDER BY dfNumber_obj, logThread_obj, logSequence_obj, logTerminal_obj desc; ------------------ -- Proxy Copies -- ------------------ CURSOR findProxyCopy( tag IN varchar2 DEFAULT NULL ,handle IN varchar2 DEFAULT NULL ,deviceType IN varchar2 DEFAULT NULL ,statusMask IN binary_integer) RETURN rcvRec_t IS SELECT proxyCopy_con_t type_con, xdf_key key_con, xdf_recid recid_con, xdf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, handle fileName_con, tag tag_con, to_number(null) copyNumber_con, status status_con, blocks blocks_con, block_size blockSize_con, device_type deviceType_con, completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, xdf.ckp_scn toSCN_act, xdf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, xdf.dbinc_key dbincKey_act, incr_level level_act, 0 section_size_act, file# dfNumber_obj, create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, xdf.keep_options keep_options, xdf.keep_until keep_until, xdf.abs_fuzzy_scn afzSCN_act, xdf.rcv_fuzzy_time rfzTime_act, xdf.rcv_fuzzy_scn rfzSCN_act, xdf.media media_con, 'NO' isrdf_con, site_key site_key_con, xdf.foreign_dbid foreignDbid_obj, decode(xdf.plugged_readonly, 'YES', 1, 0) pluggedRonly_obj, xdf.plugin_scn pluginSCN_obj, xdf.plugin_reset_scn pluginRlgSCN_obj, xdf.plugin_reset_time pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xdf, dbinc WHERE dbinc.db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = xdf.dbinc_key -- join xdf and dbinc AND (findProxyCopy.tag IS NULL OR findProxyCopy.tag = tag) AND (findProxyCopy.handle IS NULL OR findProxyCopy.handle = handle) AND (findProxyCopy.deviceType IS NULL OR findProxyCopy.deviceType = device_type) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND ((user_site_key = xdf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xdf.site_key, this_site_key))))) UNION ALL SELECT proxyCopy_con_t type_con, xcf_key key_con, xcf_recid recid_con, xcf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, handle fileName_con, tag tag_con, to_number(null) copyNumber_con, status status_con, to_number(null) blocks_con, -- xcf doesn't have blocks block_size blockSize_con, device_type deviceType_con, completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, xcf.ckp_scn toSCN_act, xcf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, xcf.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, nvl(controlfile_type, 'B') cfType_obj, xcf.keep_options keep_options, xcf.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, media media_con, 'NO' isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xcf, dbinc WHERE db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = xcf.dbinc_key -- join dbinc and xcf AND (findProxyCopy.tag IS NULL OR findProxyCopy.tag = tag) AND (findProxyCopy.handle IS NULL OR findProxyCopy.handle = handle) AND (findProxyCopy.deviceType IS NULL OR findProxyCopy.deviceType = device_type) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND ((user_site_key = xcf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xcf.site_key, this_site_key))))) UNION ALL SELECT proxyCopy_con_t type_con, xal_key key_con, xal_recid recid_con, xal_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, handle fileName_con, tag tag_con, to_number(null) copyNumber_con, xal.status status_con, xal.blocks blocks_con, xal.block_size blockSize_con, xal.device_type deviceType_con, xal.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, redo_act_t type_act, 0 fromSCN_act, to_number(null) toSCN_act, to_date(null) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, -1 dfNumber_obj, -- to sort last to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, xal.sequence# logSequence_obj, xal.thread# logThread_obj, dbinc.reset_scn logRlgSCN_obj, dbinc.reset_time logRlgTime_obj, xal.low_scn logLowSCN_obj, xal.low_time logLowTime_obj, xal.next_scn logNextSCN_obj, xal.next_time logNextTime_obj, xal.terminal logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, media media_con, 'NO' isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xal, dbinc WHERE db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = xal.dbinc_key -- join dbinc and xal -- NOTE !!! why all incarnation always, what is it used for ? AND (findProxyCopy.tag IS NULL OR findProxyCopy.tag = tag) AND (findProxyCopy.handle IS NULL OR findProxyCopy.handle = handle) AND (findProxyCopy.deviceType IS NULL OR findProxyCopy.deviceType = device_type) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND ((user_site_key = xal.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xal.site_key, this_site_key))))) ORDER BY dfnumber_obj; CURSOR findProxyCopyKey( key IN number DEFAULT NULL ,deviceType IN varchar2 DEFAULT NULL ,statusMask IN binary_integer) RETURN rcvRec_t IS SELECT proxyCopy_con_t type_con, xdf_key key_con, xdf_recid recid_con, xdf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, handle fileName_con, tag tag_con, to_number(null) copyNumber_con, status status_con, blocks blocks_con, block_size blockSize_con, device_type deviceType_con, completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, xdf.ckp_scn toSCN_act, xdf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, xdf.dbinc_key dbincKey_act, incr_level level_act, 0 section_size_act, file# dfNumber_obj, create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, xdf.keep_options keep_options, xdf.keep_until keep_until, xdf.abs_fuzzy_scn afzSCN_act, xdf.rcv_fuzzy_time rfzTime_act, xdf.rcv_fuzzy_scn rfzSCN_act, media media_con, 'NO' isrdf_con, site_key site_key_con, xdf.foreign_dbid foreignDbid_obj, decode(xdf.plugged_readonly, 'YES', 1, 0) pluggedRonly_obj, xdf.plugin_scn pluginSCN_obj, xdf.plugin_reset_scn pluginRlgSCN_obj, xdf.plugin_reset_time pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xdf, dbinc WHERE dbinc.db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = xdf.dbinc_key -- join xdf and dbinc AND (findProxyCopyKey.key = xdf_key) AND (findProxyCopyKey.deviceType IS NULL OR findProxyCopyKey.deviceType = device_type) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND ((user_site_key = xdf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xdf.site_key, this_site_key))))) UNION ALL SELECT proxyCopy_con_t type_con, xcf_key key_con, xcf_recid recid_con, xcf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, handle fileName_con, tag tag_con, to_number(null) copyNumber_con, status status_con, to_number(null) blocks_con, -- xcf doesn't have blocks block_size blockSize_con, device_type deviceType_con, completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, xcf.ckp_scn toSCN_act, xcf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, xcf.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, nvl(controlfile_type, 'B') cfType_obj, xcf.keep_options keep_options, xcf.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, media media_con, 'NO' isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xcf, dbinc WHERE db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = xcf.dbinc_key -- join dbinc and xcf AND (findProxyCopyKey.key = xcf_key) AND (findProxyCopyKey.deviceType IS NULL OR findProxyCopyKey.deviceType = device_type) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND ((user_site_key = xcf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xcf.site_key, this_site_key))))) UNION ALL SELECT proxyCopy_con_t type_con, xal_key key_con, xal_recid recid_con, xal_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, handle fileName_con, tag tag_con, to_number(null) copyNumber_con, xal.status status_con, xal.blocks blocks_con, xal.block_size blockSize_con, xal.device_type deviceType_con, xal.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, redo_act_t type_act, 0 fromSCN_act, to_number(null) toSCN_act, to_date(null) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, xal.sequence# logSequence_obj, xal.thread# logThread_obj, dbinc.reset_scn logRlgSCN_obj, dbinc.reset_time logRlgTime_obj, xal.low_scn logLowSCN_obj, xal.low_time logLowTime_obj, xal.next_scn logNextSCN_obj, xal.next_time logNextTime_obj, xal.terminal logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, media media_con, 'NO' isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xal, dbinc WHERE db_key = this_db_key -- belongs to this database AND dbinc.dbinc_key = xal.dbinc_key -- join dbinc and xal AND (findProxyCopyKey.key = xal_key) AND (findProxyCopyKey.deviceType IS NULL OR findProxyCopyKey.deviceType = device_type) AND ((user_site_key = xal.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xal.site_key, this_site_key))))) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE#; ------------------------ -- Archivelog Backups -- ------------------------ -- For 11G RMAN (client_site_aware is non-zer), we know which log belongs to -- which site. Hence using that info the rows are returned. -- For older version of RMAN client, the needstby flag is used as usual. -- Ideally, what we wanted here instead of needstby flag is to help us identify -- which belongs to this database site. Since, we don't have this -- capability for pre 11 RMAN, needstby is used. CURSOR findArchivedLogCopy( currentIncarnation IN number ,thread IN number ,sequence IN number ,lowSCN IN number ,pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer ,needstby IN number DEFAULT NULL) RETURN rcvRec_t IS -- Replaces these cursors: lcal SELECT imageCopy_con_t type_con, al_key key_con, recid recid_con, stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, name fileName_con, to_date(null) tag_con, to_number(null) copyNumber_con, status status_con, blocks blocks_con, block_size blockSize_con, 'DISK' deviceType_con, completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, to_number(null) fromSCN_act, to_number(null) toSCN_act, to_date(null) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, sequence# logSequence_obj, thread# logThread_obj, resetlogs_change# logRlgSCN_obj, resetlogs_time logRlgTime_obj, first_change# logLowSCN_obj, first_time logLowTime_obj, next_change# logNextSCN_obj, next_time logNextTime_obj, terminal logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, is_recovery_dest_file isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM rc_archived_log WHERE db_key = this_db_key -- belongs to this database AND(findArchivedLogCopy.currentIncarnation = FALSE# OR canApplyAnyRedo = TRUE# OR this_dbinc_key = dbinc_key) AND (thread IS NULL OR thread# = thread) AND (sequence IS NULL OR sequence# = sequence) AND (lowSCN IS NULL OR first_change# = lowSCN) -- Pattern isn't really needed here. The LIKE clause should be -- handled during name translation by krmkaltr. However, krmg.y -- allows the LIKE clause as a listQualifier, so we need to support -- it for that bizzarre case. AND (pattern IS NULL OR name LIKE pattern) AND (completedAfter IS NULL OR completion_time >= completedAfter) AND (completedBefore IS NULL OR completion_time <= completedBefore) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND ((client_site_aware = TRUE# AND ((user_site_key = site_key) OR -- interested in specific site (user_site_key IS NULL AND ((logs_shared = TRUE#) OR (this_site_key = nvl(site_key, this_site_key)))))) OR (client_site_aware = FALSE# AND (needstby is NULL OR nvl(is_standby, 'NO') = decode(needstby, TRUE#, 'YES', 'NO') OR (terminal = 'YES') OR (first_change# >= lbacked_al_next_scn AND first_change# <= standby_became_primary_scn)))) -- bug-1479780: translate should return only archived logs and not -- online logs AND (archived = 'YES') AND (tc_thread IS NULL OR thread# = tc_thread) AND (tc_fromSeq IS NULL OR sequence# >= tc_fromSeq) AND (tc_toSeq IS NULL OR sequence# <= tc_toSeq) AND (tc_fromSCN IS NULL OR next_change# > tc_fromSCN) AND (tc_toSCN IS NULL OR first_change# < tc_toSCN) AND (tc_pattern IS NULL OR name like tc_pattern) AND (tc_fromTime IS NULL OR next_time > tc_fromTime) AND (tc_toTime IS NULL OR first_time <= tc_toTime) ORDER BY resetlogs_change#, resetlogs_time, thread#, sequence#, terminal desc, stamp_con desc; CURSOR findArcLogBackup( sourcemask IN number ,currentIncarnation IN number DEFAULT TRUE# ,thread IN number ,sequence IN number ,lowSCN IN number ,tag IN varchar2 DEFAULT NULL ,pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable) RETURN rcvRec_t IS SELECT backupSet_con_t type_con, brl.brl_key key_con, brl.brl_recid recid_con, brl.brl_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, to_number(null) bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, brl.blocks blocks_con, brl.block_size blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, full_act_t type_act, to_number(null) fromSCN_act, to_number(null) toSCN_act, to_date(null) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, brl.sequence# logSequence_obj, brl.thread# logThread_obj, dbinc.reset_scn logRlgSCN_obj, dbinc.reset_time logRlgTime_obj, brl.low_scn logLowSCN_obj, brl.low_time logLowTime_obj, brl.next_scn logNextSCN_obj, brl.next_time logNextTime_obj, brl.terminal logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM brl, bs, dbinc WHERE (sourcemask is NULL OR bitand(sourcemask, backupSet_con_t) != 0) AND dbinc.db_key = this_db_key -- belongs to this database AND (currentIncarnation = FALSE# OR canApplyAnyRedo = TRUE# OR this_dbinc_key = dbinc.dbinc_key) AND (thread IS NULL OR brl.thread# = thread) AND (sequence IS NULL OR brl.sequence# = sequence) AND (lowSCN IS NULL OR brl.low_scn = lowSCN) AND dbinc.dbinc_key = brl.dbinc_key -- join dbinc, brl AND bs.bs_key = brl.bs_key -- join bs, brl AND bs.bck_type = 'L' -- only archivelog backups -- LIKE as a listOperand is not allowed for LIST BACKUP, so -- pattern is ignored here. AND (completedAfter IS NULL OR bs.completion_time >= completedAfter) AND (completedBefore IS NULL OR bs.completion_time <= completedBefore) AND (tc_thread IS NULL OR brl.thread# = tc_thread) AND (tc_fromSeq IS NULL OR brl.sequence# >= tc_fromSeq) AND (tc_toSeq IS NULL OR brl.sequence# <= tc_toSeq) AND (tc_fromSCN IS NULL OR brl.next_scn > tc_fromSCN) AND (tc_toSCN IS NULL OR brl.low_scn < tc_toSCN) AND (tc_fromTime IS NULL OR brl.next_time > tc_fromTime) AND (tc_toTime IS NULL OR brl.low_time <= tc_toTime) AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) UNION ALL -- Proxy ArchivedLog Backups SELECT proxyCopy_con_t type_con, xal_key key_con, xal_recid recid_con, xal_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, handle fileName_con, tag tag_con, to_number(null) copyNumber_con, xal.status status_con, xal.blocks blocks_con, xal.block_size blockSize_con, xal.device_type deviceType_con, xal.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, redo_act_t type_act, 0 fromSCN_act, to_number(null) toSCN_act, to_date(null) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, xal.sequence# logSequence_obj, xal.thread# logThread_obj, dbinc.reset_scn logRlgSCN_obj, dbinc.reset_time logRlgTime_obj, xal.low_scn logLowSCN_obj, xal.low_time logLowTime_obj, xal.next_scn logNextSCN_obj, xal.next_time logNextTime_obj, xal.terminal logTerminal_obj, to_char(null) cfType_obj, xal.keep_options keep_options, xal.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, media media_con, 'NO' isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xal, dbinc WHERE (sourcemask is NULL OR bitand(sourcemask, proxyCopy_con_t) != 0) AND dbinc.db_key = this_db_key -- belongs to this database AND (currentIncarnation = FALSE# OR canApplyAnyRedo = TRUE# OR this_dbinc_key = dbinc.dbinc_key) AND (thread IS NULL OR xal.thread# = thread) AND (sequence IS NULL OR xal.sequence# = sequence) AND (lowSCN IS NULL OR xal.low_scn = lowSCN) AND dbinc.dbinc_key = xal.dbinc_key -- join dbinc, xal AND decode(statusMask, BSavailable, decode(xal.status, 'A', TRUE#, FALSE#), isStatusMatch(xal.status, statusMask)) = TRUE# AND (findArcLogBackup.tag is NULL OR tag = findArcLogBackup.tag) AND (findArcLogBackup.pattern IS NULL OR xal.handle LIKE findArcLogBackup.pattern) AND (completedAfter IS NULL OR xal.completion_time >= completedAfter) AND (completedBefore IS NULL OR xal.completion_time <= completedBefore) AND (tc_thread IS NULL OR xal.thread# = tc_thread) AND (tc_fromSeq IS NULL OR xal.sequence# >= tc_fromSeq) AND (tc_toSeq IS NULL OR xal.sequence# <= tc_toSeq) AND (tc_fromSCN IS NULL OR xal.next_scn > tc_fromSCN) AND (tc_toSCN IS NULL OR xal.low_scn < tc_toSCN) AND (tc_fromTime IS NULL OR xal.next_time > tc_fromTime) AND (tc_toTime IS NULL OR xal.low_time <= tc_toTime) AND ((user_site_key = xal.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xal.site_key, this_site_key))))) -- latest incarnation last as recovery needs in that order ORDER BY logRlgSCN_obj, logRlgTime_obj, logThread_obj, logSequence_obj, logTerminal_obj desc, stamp_con desc; -- This one is different from findArcLogBackup because it uses index over -- brl_i_dts (ie dbinc_key, thread, sequence). Used ony for restoring -- archivelogs. CURSOR findRangeArcLogBackup( sourcemask IN number ,currentIncarnation IN number DEFAULT TRUE# ,minthread IN number ,minsequence IN number ,minlowSCN IN number ,maxthread IN number ,maxsequence IN number ,maxlowSCN IN number ,tag IN varchar2 DEFAULT NULL ,pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable) RETURN rcvRec_t IS SELECT backupSet_con_t type_con, brl.brl_key key_con, brl.brl_recid recid_con, brl.brl_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, to_number(null) bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, brl.blocks blocks_con, brl.block_size blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, full_act_t type_act, to_number(null) fromSCN_act, to_number(null) toSCN_act, to_date(null) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, brl.sequence# logSequence_obj, brl.thread# logThread_obj, dbinc.reset_scn logRlgSCN_obj, dbinc.reset_time logRlgTime_obj, brl.low_scn logLowSCN_obj, brl.low_time logLowTime_obj, brl.next_scn logNextSCN_obj, brl.next_time logNextTime_obj, brl.terminal logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM brl, bs, dbinc WHERE (sourcemask is NULL OR bitand(sourcemask, backupSet_con_t) != 0) AND dbinc.db_key = this_db_key -- belongs to this database AND (currentIncarnation = FALSE# OR canApplyAnyRedo = TRUE# OR this_dbinc_key = dbinc.dbinc_key) AND brl.thread# between minthread and maxthread AND brl.sequence# between minsequence and maxsequence AND brl.low_scn between minlowSCN and maxlowSCN AND dbinc.dbinc_key = brl.dbinc_key -- join dbinc, brl AND bs.bs_key = brl.bs_key -- join bs, brl AND bs.bck_type = 'L' -- only archivelog backups -- LIKE as a listOperand is not allowed for LIST BACKUP, so -- pattern is ignored here. AND (completedAfter IS NULL OR bs.completion_time >= completedAfter) AND (completedBefore IS NULL OR bs.completion_time <= completedBefore) AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) UNION ALL -- Proxy ArchivedLog Backups SELECT proxyCopy_con_t type_con, xal_key key_con, xal_recid recid_con, xal_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, handle fileName_con, tag tag_con, to_number(null) copyNumber_con, xal.status status_con, xal.blocks blocks_con, xal.block_size blockSize_con, xal.device_type deviceType_con, xal.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, redo_act_t type_act, 0 fromSCN_act, to_number(null) toSCN_act, to_date(null) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, xal.sequence# logSequence_obj, xal.thread# logThread_obj, dbinc.reset_scn logRlgSCN_obj, dbinc.reset_time logRlgTime_obj, xal.low_scn logLowSCN_obj, xal.low_time logLowTime_obj, xal.next_scn logNextSCN_obj, xal.next_time logNextTime_obj, xal.terminal logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, media media_con, 'NO' isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xal, dbinc WHERE (sourcemask is NULL OR bitand(sourcemask, proxyCopy_con_t) != 0) AND dbinc.db_key = this_db_key -- belongs to this database AND (currentIncarnation = FALSE# OR canApplyAnyRedo = TRUE# OR this_dbinc_key = dbinc.dbinc_key) AND xal.thread# between minthread and maxthread AND xal.sequence# between minsequence and maxsequence AND xal.low_scn between minlowSCN and maxlowSCN AND dbinc.dbinc_key = xal.dbinc_key -- join dbinc, xal AND decode(statusMask, BSavailable, decode(xal.status, 'A', TRUE#, FALSE#), isStatusMatch(xal.status, statusMask)) = TRUE# AND (findRangeArcLogBackup.tag is NULL OR tag = findRangeArcLogBackup.tag) AND (findRangeArcLogBackup.pattern IS NULL OR xal.handle LIKE findRangeArcLogBackup.pattern) AND (completedAfter IS NULL OR xal.completion_time >= completedAfter) AND (completedBefore IS NULL OR xal.completion_time <= completedBefore) AND ((user_site_key = xal.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xal.site_key, this_site_key))))) -- return latest redo first so that after pushing to rcvRecStack, it -- is poped up as last entry ORDER BY logRlgSCN_obj desc, logRlgTime_obj desc, logLowSCN_obj desc, logTerminal_obj desc, -- records marked 'YES' must be first stamp_con desc; CURSOR findAllBackupPiece( backupType IN binary_integer ,tag IN varchar2 ,statusMask IN binary_integer ,completedAfter IN date ,completedBefore IN date ,onlyrdf IN binary_integer) RETURN rcvRec_t IS SELECT backupset_con_t type_con, bp.bp_key key_con, bp.bp_recid recid_con, bp.bp_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, bs.incr_level bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, bp.handle fileName_con, bp.tag tag_con, bp.copy# copyNumber_con, bp.status status_con, ceil(bp.bytes / bs.block_size) blocks_con, bs.block_size blockSize_con, bp.device_type deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, bp.piece# pieceNumber_con, bp.completion_time bpCompTime_con, bp.compressed bpCompressed_con, multi_section multi_section_con, to_number(null) type_act, to_number(null) fromSCN_act, to_number(null) toSCN_act, to_date(null) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, to_number(null) dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, bp.media media_con, is_recovery_dest_file isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM bp, bs WHERE (bp.bs_key = bs.bs_key) -- join bp and bs AND (bs.db_key = this_db_key) -- this database AND (completedAfter IS NULL OR bs.completion_time >= completedAfter) AND (completedBefore IS NULL OR bs.completion_time <= completedBefore) AND (findAllBackupPiece.tag IS NULL or bp.tag = findAllBackupPiece.tag) AND (anyDevice = TRUE# OR isDeviceTypeAllocated(bp.device_type) = TRUE#) AND decode(statusMask, BSavailable, decode(bp.status, 'A', TRUE#, FALSE#), isStatusMatch(bp.status, statusMask)) = TRUE# AND (findAllBackupPiece.backupType IS NULL OR isBackupTypeMatch(bs.bck_type, backupType) = TRUE#) AND (findAllBackupPiece.onlyrdf = 0 OR bp.is_recovery_dest_file = 'YES') AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) -- the order here is important ORDER BY bs.bs_key, bp.device_type, bp.tag, bp.copy#, bp.piece#; --------------------------- -- Backup Set Validation -- --------------------------- getValidBackupSetLast validBackupSetRec_t; getValidBackupSetCursor varchar2(30); -- to indicate what cursor was used -- This cursor returns the device_type(s), tags, and copy#'s for which all -- backup pieces of the specified backup set are available. A code field -- is also provided which indicates whether the ORDER BY included all -- 3 fields (1), or just device_type and tag (2), or just device_type (3). -- NOTE: If you change the ORDER BY in the cursor, you probably need -- to change getValidBackupSet, cacheFindVlidBackupSet and -- cacheGetValidBackupSet. It depends on the order-by. CURSOR findValidBackupSet_c( bsKey IN number ,pieceCount IN number ,deviceType IN varchar2 DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,mask IN binary_integer) RETURN dbms_rcvman.validBackupSetRec_t IS -- N.B. -- partial_avail means the backup set is unusable either because some -- pieces are unavailable, expired or total backuppieces < existing -- pieces. In other words the backupset is INCOMPLETE. -- A partial_avail backupset can be made available by doing crosscheck, -- catalog backuppiece(TBD), making pieces available. -- Group by device_type, tag, and copy#. This way, we can see if there is -- a set of pieces with the same copy# and tag. SELECT device_type, tag, copy#, 1 FROM rc_backup_piece WHERE bs_key = findValidBackupSet_c.bsKey AND decode(mask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, mask)) = TRUE# AND (findValidBackupSet_c.tag IS NULL OR findValidBackupSet_c.tag = tag) AND (findValidBackupSet_c.deviceType IS NULL OR findValidBackupSet_c.deviceType = device_type) AND ((user_site_key = rc_backup_piece.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND rc_backup_piece.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND rc_backup_piece.device_type <> 'DISK') OR (this_site_key = nvl(rc_backup_piece.site_key, this_site_key))))) GROUP BY device_type, tag, copy# HAVING ((bitand(mask, dbms_rcvman.BSpartial_avail) = 0 AND count(DISTINCT piece#) = findValidBackupSet_c.pieceCount) OR (bitand(mask, dbms_rcvman.BSpartial_avail) <> 0 AND count(DISTINCT piece#) <= findValidBackupSet_c.pieceCount)) UNION ALL -- Allow a mix of copy numbers, but still the same tag. It is possible -- that the backup set is available only if pieces with different copy#'s -- are combined to form the complete set. SELECT device_type, tag, to_number(null), 2 FROM rc_backup_piece WHERE bs_key = findValidBackupSet_c.bsKey AND decode(mask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, mask)) = TRUE# AND (findValidBackupSet_c.tag IS NULL OR findValidBackupSet_c.tag = tag) AND (findValidBackupSet_c.deviceType IS NULL OR findValidBackupSet_c.deviceType = device_type) AND ((user_site_key = rc_backup_piece.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND rc_backup_piece.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND rc_backup_piece.device_type <> 'DISK') OR (this_site_key = nvl(rc_backup_piece.site_key, this_site_key))))) GROUP BY device_type, tag HAVING ((bitand(mask, dbms_rcvman.BSpartial_avail) = 0 AND count(DISTINCT piece#) = findValidBackupSet_c.pieceCount) OR (bitand(mask, dbms_rcvman.BSpartial_avail) <> 0 AND count(DISTINCT piece#) <= findValidBackupSet_c.pieceCount)) UNION ALL -- Allow a mix of tags. It is possible that the backup set is available -- only if we combine pieces with different tags. Since we are mixing -- tags, we are definitly also going to be mixing copy#s because any set of -- pieces composed of pieces with different tags must also have different -- copy#s. Note that this is really moot until BACKUP BACKUP PIECE is -- implemented. Until then all pieces of a backup set will have the same -- tag, or no tag at all. SELECT device_type, to_char(null), to_number(null), 3 FROM rc_backup_piece WHERE bs_key = findValidBackupSet_c.bsKey AND decode(mask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, mask)) = TRUE# AND (findValidBackupSet_c.tag IS NULL OR findValidBackupSet_c.tag = tag) AND (findValidBackupSet_c.deviceType IS NULL OR findValidBackupSet_c.deviceType = device_type) AND ((user_site_key = rc_backup_piece.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND rc_backup_piece.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND rc_backup_piece.device_type <> 'DISK') OR (this_site_key = nvl(rc_backup_piece.site_key, this_site_key))))) GROUP BY device_type HAVING ((bitand(mask, dbms_rcvman.BSpartial_avail) = 0 AND count(DISTINCT piece#) = findValidBackupSet_c.pieceCount) OR (bitand(mask, dbms_rcvman.BSpartial_avail) <> 0 AND count(DISTINCT piece#) <= findValidBackupSet_c.pieceCount)) ORDER BY 1,2,3,4; --------------------------------------------------------- -- findValidBackupSet_c optimized for 1 piece backupset -- ---------------------------------------------------------- CURSOR findValidBackupSet1P_c( bsKey IN number ,pieceCount IN number ,deviceType IN varchar2 DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,mask IN binary_integer) RETURN validBackupSetRec_t IS -- N.B. -- We don't need to check partial_avail mask because there is just -- one piece in this backupset. It just completely satisfies -- statusMask provided. -- We also don't need to group by -- o device type because the 1 piece should completely exists on one device -- o tag because the 1 piece should have the tag specified -- o copy# because each piece has distinct copy number -- SELECT device_type, tag, copy#, 1 FROM rc_backup_piece WHERE bs_key = findValidBackupSet1P_c.bsKey AND decode(mask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, mask)) = TRUE# AND ((user_site_key = rc_backup_piece.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND rc_backup_piece.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND rc_backup_piece.device_type <> 'DISK') OR (this_site_key = nvl(rc_backup_piece.site_key, this_site_key))))) AND (findValidBackupSet1P_c.tag IS NULL OR findValidBackupSet1P_c.tag = tag) AND (findValidBackupSet1P_c.deviceType IS NULL OR findValidBackupSet1P_c.deviceType = device_type) ; ------------------- -- Backup Pieces -- ------------------- -- Replaces the following 8.1.x cursors: -- bsq1, bsq2 -- NOTE: no indexs are used, so this cusor is likely to require a full -- table scan of the bp table. CURSOR findBackupPiece_c( tag IN varchar2 DEFAULT NULL ,handle IN varchar2 DEFAULT NULL ,deviceType IN varchar2 DEFAULT NULL ,copyNumber IN number DEFAULT NULL ,statusMask IN binary_integer) RETURN bpRec_t IS SELECT bp_recid, bp_stamp, bp_key, bp.bs_key, set_stamp, set_count, piece#, copy#, bp.status, bp.completion_time, handle, tag, device_type, media, bytes, compressed, bs.site_key FROM bp, bs WHERE bp.db_key = this_db_key -- belongs to this db AND bs.db_key = this_db_key -- belongs to this db AND bp.bs_key = bs.bs_key -- join bp and bs AND (findBackupPiece_c.tag IS NULL OR tag = findBackupPiece_c.tag) AND (findBackupPiece_c.handle IS NULL OR handle = findBackupPiece_c.handle) AND (findBackupPiece_c.deviceType IS NULL OR device_type = findBackupPiece_c.deviceType) AND (findBackupPiece_c.copyNumber IS NULL OR copy# = findBackupPiece_c.copyNumber) AND decode(statusMask, BSavailable, decode(bp.status, 'A', TRUE#, FALSE#), isStatusMatch(bp.status, statusMask)) = TRUE# AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) ORDER BY piece#, copy# desc, bp_stamp desc; CURSOR findBackupPieceBpKey( bpKey IN number ,tag IN varchar2 DEFAULT NULL ,handle IN varchar2 DEFAULT NULL ,deviceType IN varchar2 DEFAULT NULL ,copyNumber IN number DEFAULT NULL ,statusMask IN binary_integer) RETURN bpRec_t IS SELECT bp_recid, bp_stamp, bp_key, bp.bs_key, set_stamp, set_count, piece#, copy#, bp.status, bp.completion_time, handle, tag, device_type, media, bytes, compressed, bs.site_key FROM bp, bs WHERE bp.db_key = this_db_key AND bs.db_key = this_db_key AND bp.bs_key = bs.bs_key AND (bp_key = findBackupPieceBpKey.bpkey) AND (findBackupPieceBpKey.tag IS NULL OR tag = findBackupPieceBpKey.tag) AND (findBackupPieceBpKey.handle IS NULL OR handle = findBackupPieceBpKey.handle) AND (findBackupPieceBpKey.deviceType IS NULL OR device_type = findBackupPieceBpKey.deviceType) AND (findBackupPieceBpKey.copyNumber IS NULL OR copy# = findBackupPieceBpKey.copyNumber) AND decode(statusMask, BSavailable, decode(bp.status, 'A', TRUE#, FALSE#), isStatusMatch(bp.status, statusMask)) = TRUE# AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) ORDER BY piece#, copy# desc, bp_stamp desc; CURSOR findBackupPieceBsKey1( bsKey IN number ,tag IN varchar2 DEFAULT NULL ,handle IN varchar2 DEFAULT NULL ,deviceType IN varchar2 DEFAULT NULL ,copyNumber IN number DEFAULT NULL ,statusMask IN binary_integer) RETURN bpRec_t IS SELECT bp_recid, bp_stamp, bp_key, bp.bs_key, set_stamp, set_count, piece#, copy#, bp.status, bp.completion_time, handle, tag, device_type, media, bytes, compressed, bs.site_key FROM bp, bs WHERE bp.db_key = this_db_key AND bs.db_key = this_db_key AND bp.bs_key = bs.bs_key AND (bs.bs_key = findBackupPieceBsKey1.bsKey) AND (findBackupPieceBsKey1.tag IS NULL OR tag = findBackupPieceBsKey1.tag) AND (findBackupPieceBsKey1.handle IS NULL OR handle = findBackupPieceBsKey1.handle) AND (findBackupPieceBsKey1.deviceType IS NULL OR device_type = findBackupPieceBsKey1.deviceType) AND (findBackupPieceBsKey1.copyNumber IS NULL OR copy# = findBackupPieceBsKey1.copyNumber) AND decode(statusMask, BSavailable, decode(bp.status, 'A', TRUE#, FALSE#), isStatusMatch(bp.status, statusMask)) = TRUE# AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) ORDER BY piece#, copy# desc, bp_stamp desc; CURSOR findBackupPieceBsKey2( startBsKey IN number ,tag IN varchar2 ,statusMask IN binary_integer) RETURN bpRec_t IS SELECT bp_recid, bp_stamp, bp_key, bp.bs_key, set_stamp, set_count, piece#, copy#, bp.status, bp.completion_time, handle, tag, device_type, media, bytes, compressed, bs.site_key FROM bp, bs WHERE bp.db_key = this_db_key AND bs.db_key = this_db_key AND bp.bs_key = bs.bs_key AND (bs.bs_key >= startBsKey) AND (findBackupPieceBsKey2.tag IS NULL OR bp.tag = findBackupPieceBsKey2.tag) AND decode(statusMask, BSavailable, decode(bp.status, 'A', TRUE#, FALSE#), isStatusMatch(bp.status, statusMask)) = TRUE# AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) ORDER BY bp.bs_key, device_type, piece#, copy# desc, bp_stamp desc; ---------------------- -- Name Translation -- ---------------------- TYPE noRows_t IS RECORD ( error number, -- error number msg varchar2(100) -- error msg ); -- The tablespace_list is used to hold the list of tablespace names to -- skip when the SKIP TABLESPACE clause is specified. TYPE tablespaceList_t is table of rc_tablespace.name%TYPE index by binary_integer; skipTablespaceList tablespaceList_t; skipTablespaceCount number; -- number of tablespaces in list -------------------------- -- Datafile Translation -- -------------------------- getDatafileCursor varchar2(30); -- pointer to current cursor getDatafileNoRows noRows_t; -- Set by function that opens cursor getDatafileLast dfRec_t; -- The last row returned -- Translate the database within an SCN range. The datafiles are not part of -- the database at their until the dictionary transaction commits. -- Therefore the creation scn must be less than toSCN. -- ### offline drop makes the above statement dangerous, so allow <= -- Note that fromSCN and toSCN must not be null. -- Note that inBackup field has value 1 if included_in_database_backup is 'YES' -- We don't want to translate database where fromSCN and toSCN spans resetlogs -- operation, hence (canApplyAnyRedo = TRUE#) condition is not used here. CURSOR translateDatabase_c( fromSCN number, toSCN number) RETURN dfRec_t IS SELECT rc_datafile.file#, rc_datafile.creation_change#, creation_time, name, tablespace_name, ts#, null, blocks, block_size, bytes / 1024, null, stop_change#, read_only, rfile#, decode(included_in_database_backup, 'YES', 1, 0), aux_name, rc_datafile.dbinc_key, offr.offline_scn, offr.online_scn, offr.online_time, decode(encrypt_in_backup, 'ON', 1, 'OFF',2, 3) encrypt, -- encrypt value 1=ON, 2=OFF, 3=CLEAR rc_datafile.foreign_dbid, decode(rc_datafile.plugged_readonly, 'YES', 1, 0), rc_datafile.plugin_change#, rc_datafile.plugin_resetlogs_change#, rc_datafile.plugin_resetlogs_time, to_number(null) newDfCreationSCN, creation_thread, creation_size FROM rc_datafile, offr WHERE db_key = this_db_key AND -- belongs to this database rc_datafile.dbinc_key = this_dbinc_key AND offr.file#(+) = rc_datafile.file# AND -- outer join with offr offr.create_scn(+) = creation_change# AND offr.dbinc_key(+) = this_dbinc_key AND offr.offr_stamp(+) = 0 AND -- only offline ranges from kccfe decode(rc_datafile.plugin_change#, 0, rc_datafile.creation_change#, rc_datafile.plugin_change#) <= toSCN AND (drop_change# is null OR drop_change# > fromSCN) AND (canHandleTransportableTbs = TRUE# OR rc_datafile.plugged_readonly = 'NO') AND -- if user is interested in specific site, return that site files -- else return the file names for translation_site_key. (nvl(realf_site_key, translation_site_key) = site_key) ORDER BY rc_datafile.file#; -- Translate a tablespace name (always relative to current time) -- Note: inBackup field has value 1 if included_in_database_backup is 'YES' CURSOR translateTablespace_c( tsName varchar2) RETURN dfRec_t IS SELECT file#, creation_change#, creation_time, name, tablespace_name, ts#, null, blocks, block_size, bytes / 1024, null, stop_change#, read_only, rfile#, decode(included_in_database_backup, 'YES', 1, 0), aux_name, dbinc_key, NULL, NULL, NULL, decode(encrypt_in_backup, 'ON', 1, 'OFF',2, 3) encrypt, -- encrypt value 1=ON, 2=OFF, 3=CLEAR rc_datafile.foreign_dbid, decode(rc_datafile.plugged_readonly, 'YES', 1, 0), rc_datafile.plugin_change#, rc_datafile.plugin_resetlogs_change#, rc_datafile.plugin_resetlogs_time, to_number(null) newDfCreationSCN, creation_thread, creation_size FROM rc_datafile WHERE db_key = this_db_key AND -- part of this db tablespace_name = translateTablespace_c.tsName AND dbinc_key = this_dbinc_key AND ((untilSCN is null AND drop_change# is null) OR ((decode(plugin_change#, 0, creation_change#, plugin_change#) <= untilSCN) AND (drop_change# is null or drop_change# > untilSCN))) AND (nvl(realf_site_key, translation_site_key) = site_key) AND (canHandleTransportableTbs = TRUE# OR rc_datafile.plugged_readonly = 'NO') ORDER BY file#; -- Translate a datafile name (always relative to current time -- Note: inBackup field has value 1 if included_in_database_backup is 'YES' CURSOR translateDatafileName( fileName varchar2) RETURN dfRec_t IS SELECT file#, creation_change#, creation_time, name, tablespace_name, ts#, null, blocks, block_size, bytes / 1024, null, stop_change#, read_only, rfile#, decode(included_in_database_backup, 'YES', 1, 0), aux_name, dbinc_key, NULL, NULL, NULL, decode(encrypt_in_backup, 'ON', 1, 'OFF',2, 3) encrypt, -- encrypt value 1=ON, 2=OFF, 3=CLEAR rc_datafile.foreign_dbid, decode(rc_datafile.plugged_readonly, 'YES', 1, 0), rc_datafile.plugin_change#, rc_datafile.plugin_resetlogs_change#, rc_datafile.plugin_resetlogs_time, to_number(null) newDfCreationSCN, creation_thread, creation_size FROM rc_datafile WHERE db_key = this_db_key AND -- belongs to this database name = translateDatafilename.fileName AND -- filename matches dbinc_key = this_dbinc_key AND drop_change# is null AND -- filename currently part of db (untilSCN is null OR decode(plugin_change#, 0, creation_change#, plugin_change#) < untilSCN) AND -- following "and" clause to detect ambiguous names ((untilSCN is null) OR -- no until clause ((untilTime is not null) AND NOT EXISTS (SELECT 1 FROM rc_datafile WHERE dbinc_key = this_dbinc_key AND name = translateDatafilename.fileName AND (plugin_change# != 0 OR nvl(creation_time, MINDATEVAL) < untilTime) AND drop_time > untilTime AND (nvl(realf_site_key, translation_site_key) = site_key))) OR ((untilSCN is not null) AND NOT EXISTS (SELECT 1 FROM rc_datafile WHERE dbinc_key = this_dbinc_key AND name = translateDatafilename.fileName AND decode(plugin_change#, 0, creation_change#, plugin_change#) < untilSCN AND drop_change# > untilSCN AND (nvl(realf_site_key, translation_site_key)=site_key)))) AND (canHandleTransportableTbs = TRUE# OR plugged_readonly = 'NO'); -- Translate a datafile number -- Note: inBackup field has value 1 if included_in_database_backup is 'YES' CURSOR translateDatafileNumber( fno number) RETURN dfRec_t IS SELECT file#, creation_change#, creation_time, name, tablespace_name, ts#, null, blocks, block_size, bytes / 1024, null, stop_change#, read_only, rfile#, decode(included_in_database_backup, 'YES', 1, 0), aux_name, dbinc_key, NULL, NULL, NULL, decode(encrypt_in_backup, 'ON', 1, 'OFF',2, 3) encrypt, -- encrypt value 1=ON, 2=OFF, 3=CLEAR rc_datafile.foreign_dbid, decode(rc_datafile.plugged_readonly, 'YES', 1, 0), rc_datafile.plugin_change#, rc_datafile.plugin_resetlogs_change#, rc_datafile.plugin_resetlogs_time, to_number(null) newDfCreationSCN, creation_thread, creation_size FROM rc_datafile WHERE db_key = this_db_key AND -- belongs to this database file# = translateDataFileNumber.fno AND -- filenumber matches dbinc_key = this_dbinc_key AND ((untilSCN is null AND drop_change# is null) OR ((nvl(creation_time, MINDATEVAL) < untilTime OR decode(plugin_change#, 0, creation_change#, plugin_change#) < untilSCN) AND (drop_time > untilTime OR drop_change# > untilSCN OR drop_change# is null))) AND (nvl(realf_site_key, translation_site_key) = site_key) AND (canHandleTransportableTbs = TRUE# OR plugged_readonly = 'NO'); -- Translate a datafile number and checkpoint SCN -- Note: inBackup field has value 1 if included_in_database_backup is 'YES' CURSOR translateDatafileCheckpoint( fno number ,ckpSCN number) RETURN dfRec_t IS SELECT file#, creation_change#, creation_time, name, tablespace_name, ts#, null, blocks, block_size, bytes / 1024, null, stop_change#, read_only, rfile#, decode(included_in_database_backup, 'YES', 1, 0), aux_name, dbinc_key, NULL, NULL, NULL, decode(encrypt_in_backup, 'ON', 1, 'OFF',2, 3) encrypt, -- encrypt value 1=ON, 2=OFF, 3=CLEAR rc_datafile.foreign_dbid, decode(rc_datafile.plugged_readonly, 'YES', 1, 0), rc_datafile.plugin_change#, rc_datafile.plugin_resetlogs_change#, rc_datafile.plugin_resetlogs_time, to_number(null) newDfCreationSCN, creation_thread, creation_size FROM rc_datafile WHERE db_key = this_db_key -- belongs to this database AND file# = translateDatafileCheckpoint.fno -- filenumber matches AND dbinc_key = this_dbinc_key AND translateDatafileCheckpoint.ckpSCN >= decode(plugin_change#, 0, creation_change#, plugin_change#) AND (drop_change# IS NULL OR translateDatafileCheckpoint.ckpSCN < drop_change#) AND (canHandleTransportableTbs = TRUE# OR plugged_readonly = 'NO') AND (nvl(realf_site_key, translation_site_key) = site_key); -- Translate all datafiles that ever existed -- Note that we return nulls for most fields here. This is because -- the controlfile version of this cursor has to return nulls for them -- because it doesn't have the information, and so we return nulls here -- so that things are consistant. -- We assume that if a datafile was added and backed up, then a resync -- was done before the datafile was dropped. RMAN could not have created -- the backup otherwise. -- -- Note that function getDataFile returns only the first occurance -- in case of duplicated entries. So, it is important that -- translateAllDatafile cursor returns the current incarnation files first -- before considering older incarnation files. Otherwise, we may end up in -- listing incorrect filenames (see bug-2336178). -- -- Note: inBackup field has value 1 if included_in_database_backup is 'YES' CURSOR translateAllDatafile_c RETURN dfRec_t IS SELECT DISTINCT file# dfNumber, creation_change# dfCreationSCN, creation_time dfCreationTime, name fileName, tablespace_name tsName, ts# tsNumber, to_char(null) status, blocks blocks, block_size blockSize, bytes / 1024 kbytes, to_number(null) unrecovSCN, stop_change# stopSCN, FALSE# readOnly, rfile# rfNumber, decode(included_in_database_backup, 'YES', 1, 0) inBackup, aux_name auxNAme, dbinc_key dbincKey, NULL dfOfflineSCN, NULL dfOnlineSCN, NULL dfOnlineTime, decode(encrypt_in_backup, 'ON', 1, 'OFF',2, 3) encrypt, -- encrypt value 1=ON, 2=OFF, 3=CLEAR rc_datafile.foreign_dbid, decode(rc_datafile.plugged_readonly, 'YES', 1, 0), rc_datafile.plugin_change#, rc_datafile.plugin_resetlogs_change#, rc_datafile.plugin_resetlogs_time, decode(rc_datafile.plugin_change#, 0, rc_datafile.creation_change#, rc_datafile.plugin_change#) newDfCreationSCN, creation_thread, creation_size FROM rc_datafile WHERE db_key = this_db_key AND (canHandleTransportableTbs = TRUE# OR plugged_readonly = 'NO') AND (nvl(realf_site_key, translation_site_key) = site_key) ORDER BY file#, decode(dbinc_key, this_dbinc_key, 0, 1), newDfCreationSCN desc; -- Translate all datafiles in rc_database_block_corruption -- Note: inBackup field has value 1 if included_in_database_backup is 'YES' CURSOR translateCorruptList_c RETURN dfRec_t IS SELECT DISTINCT rcd.file#, rcd.creation_change#, rcd.creation_time, rcd.name, rcd.tablespace_name, rcd.ts#, null, rcd.blocks, rcd.block_size, rcd.bytes / 1024, null, rcd.stop_change#, rcd.read_only, rcd.rfile#, decode(rcd.included_in_database_backup, 'YES', 1, 0), aux_name, rcd.dbinc_key, NULL, NULL, NULL, decode(rcd.encrypt_in_backup, 'ON', 1, 'OFF',2, 3) encrypt, -- encrypt value 1=ON, 2=OFF, 3=CLEAR rcd.foreign_dbid, decode(rcd.plugged_readonly, 'YES', 1, 0), rcd.plugin_change#, rcd.plugin_resetlogs_change#, rcd.plugin_resetlogs_time, to_number(null) newDfCreationSCN, rcd.creation_thread, rcd.creation_size FROM rc_datafile rcd, (select distinct file# from rc_database_block_corruption where dbinc_key = this_dbinc_key and corruption_type != 'NOLOGGING') bc WHERE rcd.db_key = this_db_key AND -- belongs to this database rcd.file# = bc.file# AND -- filenumber matches rcd.dbinc_key = this_dbinc_key AND (canHandleTransportableTbs = TRUE# OR rcd.plugged_readonly = 'NO') AND ((untilSCN is null AND rcd.drop_change# is null) OR ((nvl(rcd.creation_time, MINDATEVAL) < untilTime OR decode(rcd.plugin_change#, 0, rcd.creation_change#, rcd.plugin_change#) < untilSCN) AND (rcd.drop_time > untilTime OR rcd.drop_change# > untilSCN OR rcd.drop_change# is null))) AND (nvl(realf_site_key, translation_site_key) = site_key) ORDER BY rcd.file#; -- do not change this as krmkcortr is -- dependent on this -------------------------- -- Tempfile Translation -- -------------------------- getTempfileCursor varchar2(30); -- pointer to current cursor -- Translate tempfiles -- When untilSCN is specified, then get all the tempfiles that exists now -- on tablespaces existed at untilTime. In otherwords, untilSCN applies -- only to tablespace and **not** to tempfiles. CURSOR translateTempfile_c RETURN tfRec_t IS SELECT file# tfNumber, creation_change# tfCreationSCN, creation_time tfCreationTime, name fileName, tablespace_name tsName, ts# tsNumber, decode(autoextend, 'ON', 16, 0) status, bigfile isSFT, blocks blocks, block_size blockSize, maxsize maxSize, nextsize nextSize, rfile# rfNumber, dbinc_key dbincKey FROM rc_tempfile WHERE dbinc_key = this_dbinc_key -- belongs to this incarnation AND drop_change# is NULL -- tempfile exists now AND (untilSCN is NULL OR ((tablespace_creation_change# < untilSCN OR nvl(tablespace_creation_time, MINDATEVAL) < untilTime) AND tablespace_drop_change# IS NULL)) AND (nvl(realf_site_key, translation_site_key) = site_key) AND name is not NULL ORDER BY file#; CURSOR translateTempfileName_c(fileName IN varchar2) RETURN tfRec_t IS SELECT file# tfNumber, creation_change# tfCreationSCN, creation_time tfCreationTime, name fileName, tablespace_name tsName, ts# tsNumber, decode(autoextend, 'ON', 16, 0) status, bigfile isSFT, blocks blocks, block_size blockSize, maxsize maxSize, nextsize nextSize, rfile# rfNumber, dbinc_key dbincKey FROM rc_tempfile WHERE dbinc_key = this_dbinc_key -- belongs to this incarnation AND drop_change# is NULL -- tempfile exists now AND (untilSCN is NULL OR ((tablespace_creation_change# < untilSCN OR nvl(tablespace_creation_time, MINDATEVAL) < untilTime) AND tablespace_drop_change# IS NULL)) AND name = translateTempfileName_c.fileName -- filename matches AND (nvl(realf_site_key, translation_site_key) = site_key) AND name is not NULL ORDER BY file#; CURSOR translateTempfileNumber_c(fno IN number) RETURN tfRec_t IS SELECT file# tfNumber, creation_change# tfCreationSCN, creation_time tfCreationTime, name fileName, tablespace_name tsName, ts# tsNumber, decode(autoextend, 'ON', 16, 0) status, bigfile isSFT, blocks blocks, block_size blockSize, maxsize maxSize, nextsize nextSize, rfile# rfNumber, dbinc_key dbincKey FROM rc_tempfile WHERE dbinc_key = this_dbinc_key -- belongs to this incarnation AND drop_change# is NULL -- tempfile exists now AND (untilSCN is NULL OR ((tablespace_creation_change# < untilSCN OR nvl(tablespace_creation_time, MINDATEVAL) < untilTime) AND tablespace_drop_change# IS NULL)) AND file# = translateTempfileNumber_c.fno -- filenumber matches AND (nvl(realf_site_key, translation_site_key) = site_key) AND name is not NULL ORDER BY file#; ---------------------------- -- Online Log Translation -- ---------------------------- CURSOR translateOnlineLogs_c(srls IN number) IS SELECT thread#, group#, name FROM rc_redo_log WHERE dbinc_key = this_dbinc_key AND (nvl(realf_site_key, translation_site_key) = site_key) AND ((type = 'ONLINE' AND srls = 0) OR (type = 'STANDBY' AND srls = 1)) ORDER BY thread#, group#, name; ------------------------------ -- Archived Log Translation -- ------------------------------ getArchivedLogNoRows noRows_t; getArchivedLogDuplicates number; -- Duplicate filtering flag getArchivedLogLast alRec_t; -- used for duplicate filtering getArchivedLogCursor varchar2(40); getArchivedLogDoingRecovery number; -- for filtering orphan logs getArchivedLogOnlyrdf number := 0; getrcvRecLast rcvRec_t; CURSOR translateArcLogKey( alKey IN number) RETURN alRec_t IS SELECT al_key, recid, stamp, thread#, sequence#, name, first_change#, first_time, next_change#, next_time, resetlogs_change#, resetlogs_time, blocks, block_size, status, completion_time, 0, is_recovery_dest_file, compressed, decode(is_standby, 'YES', 'Y', 'N') stby, terminal, site_key, 0 site_key_order_col, 0 source_dbid FROM rc_archived_log WHERE db_key = this_db_key AND archived = 'YES' AND al_key = translateArcLogKey.alKey; CURSOR translateArcLogName( fname IN varchar2 ,statusMask IN binary_integer ,online IN number -- IGNORED! ,needstby IN number DEFAULT NULL) RETURN alRec_t IS SELECT al_key, recid, stamp, thread#, sequence#, name, first_change#, first_time, next_change#, next_time, resetlogs_change#, resetlogs_time, blocks, block_size, status, completion_time, 0, is_recovery_dest_file, compressed, decode(is_standby, 'YES', 'Y', 'N') stby, terminal, site_key, 0 site_key_order_col, 0 source_dbid FROM rc_archived_log WHERE db_key = this_db_key AND name = translateArcLogName.fname AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND ((client_site_aware = TRUE# AND ((user_site_key = site_key) OR -- interested in specific site (user_site_key IS NULL AND ((logs_shared = TRUE#) OR (this_site_key = nvl(site_key, this_site_key)))))) OR (client_site_aware = FALSE# AND (needstby is NULL OR nvl(is_standby, 'NO') = decode(needstby, TRUE#, 'YES', 'NO') OR (terminal = 'YES') OR (first_change# >= lbacked_al_next_scn AND first_change# <= standby_became_primary_scn)))) ORDER BY is_recovery_dest_file desc, stamp desc; CURSOR translateArcLogSeqRange( thread# IN number ,incarn IN number ,fromseq# IN number ,toseq# IN number ,pattern IN varchar2 ,statusMask IN binary_integer ,online IN number -- IGNORED! ,needstby IN number DEFAULT NULL) RETURN alRec_t IS SELECT al_key, recid, stamp, thread#, sequence#, name, first_change#, first_time, next_change#, next_time, resetlogs_change#, resetlogs_time, blocks, block_size, status, completion_time, 0, is_recovery_dest_file, compressed, decode(is_standby, 'YES', 'Y', 'N') stby, terminal, site_key, 0 site_key_order_col, 0 source_dbid FROM rc_archived_log WHERE db_key = this_db_key AND dbinc_key = DECODE (translateArcLogSeqRange.incarn, -1, this_dbinc_key, 0, dbinc_key, translateArcLogSeqRange.incarn) AND (translateArcLogSeqRange.thread# IS NULL OR thread# = translateArcLogSeqRange.thread#) AND sequence# between nvl(fromseq#, 0) and nvl(toseq#, MAXSEQVAL) AND (pattern is null OR name like pattern) AND isstatusMatch(status,statusMask) = TRUE# AND archived = 'YES' -- this will also filter out cleared logs AND ((client_site_aware = TRUE# AND ((user_site_key = site_key) OR -- interested in specific site (user_site_key IS NULL AND ((logs_shared = TRUE#) OR (this_site_key = NVL(site_key, this_site_key)))))) OR (client_site_aware = FALSE# AND (needstby IS NULL OR nvl(is_standby, 'NO') = DECODE(needstby, TRUE#, 'YES', 'NO') OR (terminal = 'YES') OR (first_change# >= lbacked_al_next_scn AND first_change# <= standby_became_primary_scn)))) ORDER BY thread#, sequence#, terminal DESC, is_recovery_dest_file DESC, stamp DESC; -- this cursor must always called to fetch archivelogs with status deleted. CURSOR translateArcLogSeqRange2( thread# IN number ,incarn IN number ,fromseq# IN number ,toseq# IN number ,statusMask IN binary_integer -- must atleast have BSdeleted ,online IN number ,needstby IN number DEFAULT NULL) RETURN alRec_t IS SELECT al_key, recid, DECODE(next_change#, MAXSCNVAL_NEXT_CHANGE, -2, stamp) stamp, thread#, sequence#, name, first_change#, first_time, next_change#, next_time, resetlogs_change#, resetlogs_time, blocks, block_size, status, completion_time, 0, is_recovery_dest_file, compressed, decode(is_standby, 'YES', 'Y', 'N') stby, terminal, site_key, 0 site_key_order_col, 0 source_dbid FROM rc_archived_log WHERE db_key = this_db_key AND dbinc_key = DECODE (translateArcLogSeqRange2.incarn, -1, this_dbinc_key, 0, dbinc_key, translateArcLogSeqRange2.incarn) AND (translateArcLogSeqRange2.thread# IS NULL OR thread# = translateArcLogSeqRange2.thread#) AND sequence# between NVL(fromseq#, 0) and NVL(toseq#, MAXSEQVAL) AND (archived = 'YES' OR -- this will also filter out cleared logs (online = TRUE# and archived = 'NO' and name IS NOT NULL)) AND ((client_site_aware = TRUE# AND ((user_site_key = site_key) OR -- interested in specific site (user_site_key IS NULL AND ((logs_shared = TRUE#) OR (this_site_key = NVL(site_key, this_site_key)))))) OR (client_site_aware = FALSE# AND (needstby IS NULL OR nvl(is_standby, 'NO') = decode(needstby, TRUE#, 'YES', 'NO') OR (terminal = 'YES') OR (first_change# >= lbacked_al_next_scn AND first_change# <= standby_became_primary_scn)))) AND isstatusMatch(status,statusMask) = TRUE# UNION ALL SELECT DISTINCT -- to filter duplicates TO_NUMBER(NULL), TO_NUMBER(NULL), -1, -- to sort last (desc) brl.thread#, brl.sequence#, TO_CHAR(NULL), brl.low_scn, brl.low_time, brl.next_scn, brl.next_time, dbinc.reset_scn, dbinc.reset_time, brl.blocks, brl.block_size, 'D', TO_DATE(NULL), 0, 'NO', 'NO', 'N', brl.terminal, 0, 0 site_key_order_col, 0 source_dbid FROM brl, dbinc WHERE brl.dbinc_key = dbinc.dbinc_key -- join condition AND dbinc.db_key = this_db_key AND brl.dbinc_key = DECODE (translateArcLogSeqRange2.incarn, -1, this_dbinc_key, 0, brl.dbinc_key, translateArcLogSeqRange2.incarn) AND (translateArcLogSeqRange2.thread# IS NULL OR brl.thread# = translateArcLogSeqRange2.thread#) AND brl.sequence# BETWEEN NVL(fromseq#, 0) AND NVL(toseq#, MAXSEQVAL) -- ignore this as client will call us with deleted status always -- AND isstatusMatch('D',statusMask) = TRUE# UNION -- to filter duplicates between brl and xal SELECT DISTINCT -- to filter duplicates TO_NUMBER(NULL), TO_NUMBER(NULL), -1, -- to sort last (desc) xal.thread#, xal.sequence#, TO_CHAR(NULL), xal.low_scn, xal.low_time, xal.next_scn, xal.next_time, dbinc.reset_scn, dbinc.reset_time, xal.blocks, xal.block_size, 'D', TO_DATE(NULL), 0, 'NO', 'NO', 'N', xal.terminal, xal.site_key, 0 site_key_order_col, 0 source_dbid FROM xal, dbinc WHERE xal.dbinc_key = dbinc.dbinc_key -- join condition AND dbinc.db_key = this_db_key AND xal.dbinc_key = DECODE (translateArcLogSeqRange2.incarn, -1, this_dbinc_key, 0, xal.dbinc_key, translateArcLogSeqRange2.incarn) AND (translateArcLogSeqRange2.thread# IS NULL OR xal.thread# = translateArcLogSeqRange2.thread#) AND xal.sequence# BETWEEN NVL(fromseq#, 0) AND NVL(toseq#, MAXSEQVAL) -- ignore this as client will call us with deleted status always -- AND isstatusMatch('D',statusMask) = TRUE# AND ((user_site_key = xal.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = NVL(xal.site_key, this_site_key))))) -- thread#, sequence#, terminal, is_recovery_dest_file DESC, -- al_stamp DESC; ORDER BY 4, 5, 21 DESC, 18 DESC, 3 DESC; -- this cursor can get logs for previous incarnation CURSOR translateArcLogTimeRange( thread# IN number ,incarn IN number ,fromTime IN date ,toTime IN date ,pattern IN varchar2 ,statusMask IN binary_integer ,online IN number -- IGNORED! ,needstby IN number DEFAULT NULL) RETURN alRec_t IS SELECT al_key, recid, stamp, thread#, sequence#, name, first_change#, first_time, next_change#, next_time, resetlogs_change#, resetlogs_time, blocks, block_size, status, completion_time, 0, is_recovery_dest_file, compressed, decode(is_standby, 'YES', 'Y', 'N') stby, terminal, site_key, 0 site_key_order_col, 0 source_dbid FROM rc_archived_log WHERE db_key = this_db_key AND (canApplyAnyRedo = TRUE# OR dbinc_key = this_dbinc_key) AND dbinc_key = DECODE (translateArcLogTimeRange.incarn, -1, this_dbinc_key, 0, dbinc_key, translateArcLogTimeRange.incarn) AND (translateArcLogTimeRange.thread# IS NULL OR thread# = translateArcLogTimeRange.thread#) AND next_time > NVL(fromTime, MINDATEVAL) AND first_time <= NVL(toTime, MAXDATEVAL) AND (pattern IS NULL OR name LIKE pattern) AND DECODE(statusMask, BSavailable, DECODE(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND archived = 'YES' -- this will also filter out cleared logs AND ((client_site_aware = TRUE# AND ((user_site_key = site_key) OR -- interested in specific site (user_site_key IS NULL AND ((logs_shared = TRUE#) OR (this_site_key = NVL(site_key, this_site_key)))))) OR (client_site_aware = FALSE# AND (needstby IS NULL OR nvl(is_standby, 'NO') = DECODE(needstby, TRUE#, 'YES', 'NO') OR (terminal = 'YES') OR (first_change# >= lbacked_al_next_scn AND first_change# <= standby_became_primary_scn)))) -- last incarnation last as recovery needs them in that order. ORDER BY resetlogs_change#, resetlogs_time, thread#, sequence#, terminal DESC, is_recovery_dest_file DESC, stamp DESC; -- this cursor can return parent incarnation logs and -- must always called to fetch archivelogs with status deleted. CURSOR translateArcLogTimeRange2( thread# IN number ,incarn IN number ,fromTime IN date ,toTime IN date ,statusMask IN binary_integer -- must atleast have BSdeleted ,online IN number ,needstby IN number DEFAULT NULL) RETURN alRec_t IS SELECT al_key, recid, DECODE(next_change#, MAXSCNVAL_NEXT_CHANGE, -2, stamp) stamp, thread#, sequence#, name, first_change#, first_time, next_change#, next_time, resetlogs_change#, resetlogs_time, blocks, block_size, status, completion_time, 0, is_recovery_dest_file, compressed, DECODE(is_standby, 'YES', 'Y', 'N') stby, terminal, site_key, 0 site_key_order_col, 0 source_dbid FROM rc_archived_log WHERE db_key = this_db_key AND (canApplyAnyRedo = TRUE# OR dbinc_key = this_dbinc_key) AND dbinc_key = DECODE (translateArcLogTimeRange2.incarn, -1, this_dbinc_key, 0, dbinc_key, translateArcLogTimeRange2.incarn) AND (translateArcLogTimeRange2.thread# IS NULL OR thread# = translateArcLogTimeRange2.thread#) AND next_time > NVL(fromTime, MINDATEVAL) AND first_time <= NVL(toTime, MAXDATEVAL) AND (archived = 'YES' OR -- this will also filter out cleared logs (online = TRUE# AND archived = 'NO' AND name IS NOT NULL AND resetlogs_change# = this_reset_scn AND resetlogs_time = this_reset_time)) AND ((client_site_aware = TRUE# AND ((user_site_key = site_key) OR -- interested in specific site (user_site_key IS NULL AND ((logs_shared = TRUE#) OR (this_site_key = NVL(site_key, this_site_key)))))) OR (client_site_aware = FALSE# AND (needstby IS NULL OR NVL(is_standby, 'NO') = DECODE(needstby, TRUE#, 'YES', 'NO') OR (terminal = 'YES') OR (first_change# >= lbacked_al_next_scn AND first_change# <= standby_became_primary_scn)))) AND isstatusMatch(status,statusMask) = TRUE# UNION ALL SELECT DISTINCT -- to filter duplicates TO_NUMBER(NULL), TO_NUMBER(NULL), -1, -- to sort last thread#, sequence#, TO_CHAR(NULL), low_scn, low_time, next_scn, next_time, reset_scn, reset_time, blocks, block_size, 'D', TO_DATE(NULL), 0, 'NO', 'NO', 'N', terminal, 0, 0 site_key_order_col, 0 source_dbid FROM brl, dbinc WHERE brl.dbinc_key = dbinc.dbinc_key -- join condition AND dbinc.db_key = this_db_key AND (canApplyAnyRedo = TRUE# OR brl.dbinc_key = this_dbinc_key) AND dbinc.dbinc_key = DECODE (translateArcLogTimeRange2.incarn, -1, this_dbinc_key, 0, dbinc.dbinc_key, translateArcLogTimeRange2.incarn) AND next_time > NVL(fromTime, MINDATEVAL) AND low_time <= NVL(toTime, MAXDATEVAL) AND (translateArcLogTimeRange2.thread# IS NULL OR thread# = translateArcLogTimeRange2.thread#) -- ignore this as client will call us with deleted status always -- AND isstatusMatch('D',statusMask) = TRUE# UNION -- to filter duplicates between brl and xal SELECT DISTINCT -- to filter duplicates TO_NUMBER(NULL), TO_NUMBER(NULL), -1, -- to sort last thread#, sequence#, TO_CHAR(NULL), low_scn, low_time, next_scn, next_time, reset_scn, reset_time, blocks, block_size, 'D', TO_DATE(NULL), 0, 'NO', 'NO', 'N', terminal, site_key, 0 site_key_order_col, 0 source_dbid FROM xal, dbinc WHERE xal.dbinc_key = dbinc.dbinc_key -- join condition AND dbinc.db_key = this_db_key AND (canApplyAnyRedo = TRUE# OR xal.dbinc_key = this_dbinc_key) AND dbinc.dbinc_key = DECODE (translateArcLogTimeRange2.incarn, -1, this_dbinc_key, 0, dbinc.dbinc_key, translateArcLogTimeRange2.incarn) AND next_time > NVL(fromTime, MINDATEVAL) AND low_time <= NVL(toTime, MAXDATEVAL) AND (translateArcLogTimeRange2.thread# IS NULL OR thread# = translateArcLogTimeRange2.thread#) -- latest incarnation last, as recovery needs them in that order. -- reset_scn, reset_time, thread#, sequence#, terminal -- is_recovery_dest_file desc, al_stamp desc; -- ignore this as client will call us with deleted status always -- AND isstatusMatch('D',statusMask) = TRUE# AND ((user_site_key = xal.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = NVL(xal.site_key, this_site_key))))) ORDER BY 11, 12, 4, 5, 21 DESC, 18 DESC, 3 DESC; -- This is the only cursor that respects the online parameter. -- It can return previous incarnation logs CURSOR translateArcLogSCNRange( thread# IN number ,incarn IN number ,sequence# IN number ,fromSCN IN number ,toSCN IN number ,pattern IN varchar2 ,statusMask IN binary_integer ,online IN number ,needstby IN number DEFAULT NULL ,reset_scn IN number ,reset_time IN date) RETURN alRec_t IS SELECT al_key, recid, DECODE(next_change#, MAXSCNVAL_NEXT_CHANGE, -2, stamp) stamp, thread#, sequence#, name, first_change#, first_time, next_change#, next_time, resetlogs_change#, resetlogs_time, blocks, block_size, status, completion_time, 0, is_recovery_dest_file, compressed, DECODE(is_standby, 'YES', 'Y', 'N') stby, terminal, site_key, 0 site_key_order_col, 0 source_dbid FROM rc_archived_log WHERE db_key = this_db_key AND ((canApplyAnyRedo = TRUE# AND (translateArcLogSCNRange.reset_scn IS NULL OR (translateArcLogSCNRange.reset_scn = resetlogs_change# AND translateArcLogSCNRange.reset_time = resetlogs_time))) OR (dbinc_key = this_dbinc_key)) AND dbinc_key = DECODE (translateArcLogSCNRange.incarn, -1, this_dbinc_key, 0, dbinc_key, translateArcLogSCNRange.incarn) AND (translateArcLogSCNRange.thread# IS NULL OR thread# = translateArcLogSCNRange.thread#) AND (translateArcLogSCNRange.sequence# IS NULL OR sequence# = translateArcLogSCNRange.sequence#) AND next_change# > NVL(fromSCN, 0) AND first_change# < NVL(toSCN, MAXSCNVAL) AND (pattern IS NULL OR name LIKE pattern) AND decode(statusMask, BSavailable, DECODE(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND (archived = 'YES' OR -- this will also filter out cleared logs (online = TRUE# and archived = 'NO' and name IS NOT NULL and resetlogs_change# = this_reset_scn and resetlogs_time = this_reset_time)) AND ((client_site_aware = TRUE# AND ((user_site_key = site_key) OR -- interested in specific site (user_site_key IS NULL AND ((logs_shared = TRUE#) OR (this_site_key = NVL(site_key, this_site_key)))))) OR (client_site_aware = FALSE# AND (needstby IS NULL OR NVL(is_standby, 'NO') = DECODE(needstby, TRUE#, 'YES', 'NO') OR (terminal = 'YES') OR (first_change# >= lbacked_al_next_scn AND first_change# <= standby_became_primary_scn)))) -- last incarnation last, as recovery needs them in that order. ORDER BY resetlogs_change#, resetlogs_time, thread#, sequence#, terminal DESC, is_recovery_dest_file DESC, stamp DESC; -- This cursor is used by krmkdmr(). krmkdmr() uses -- this to get all archivelogs in the SCN range (x..infinity) where x -- is the SCN where media recovery first requests a log. This is the only -- cursor that respects the online parameter. Note that if we have -- an inspected current online log in the list, then either that log has never -- been archived, or if it has been archived, then the archived record has -- a higher stamp, so the order by stamp desc means the archived copy -- is returned first. -- We union the al table with the brl table because -- an archivedlog in the SCN range may have had its al record deleted or -- uncataloged, but we still have a backup of that log. We could instead/also -- union with rc_log_history, but if that gives us a log we wouldn't otherwise -- see, then we will fail anyway since there is no copy or backup of that log. -- As it is, we will give a meaningful error message if recovery asks for a log -- we don't have, so there is no need to look at the log_history. -- NOTE!!! NOTE!!! NOTE!!! -- needstby flag is not used here, because media recovery should get all -- logs to inspect as it can use that file if accessible on disk. -- -- This must always called to fetch archivelogs with status deleted. CURSOR translateArcLogSCNRange2( thread# IN number ,incarn IN number ,sequence# IN number ,fromSCN IN number ,toSCN IN number ,toTime IN date ,statusMask IN binary_integer -- must atleast have BSdeleted ,online IN number ,needstby IN number DEFAULT NULL -- IGNORED ,reset_scn IN number ,reset_time IN date) RETURN alRec_t IS SELECT al_key, recid, DECODE(next_change#, MAXSCNVAL_NEXT_CHANGE, -2, stamp) stamp, thread#, sequence#, name, first_change#, first_time, next_change#, next_time, resetlogs_change#, resetlogs_time, blocks, block_size, status, completion_time, 0, is_recovery_dest_file, compressed, DECODE(is_standby, 'YES', 'Y', 'N') stby, terminal, site_key, DECODE(next_change#, MAXSCNVAL_NEXT_CHANGE, -1, DECODE(site_key, this_site_key, 1, 0)) site_key_order_col, 0 source_dbid FROM rc_archived_log WHERE db_key = this_db_key AND ((canApplyAnyRedo = TRUE# AND (translateArcLogSCNRange2.reset_scn IS NULL OR (translateArcLogSCNRange2.reset_scn = resetlogs_change# AND translateArcLogSCNRange2.reset_time = resetlogs_time))) OR (dbinc_key = this_dbinc_key)) AND dbinc_key = DECODE (translateArcLogSCNRange2.incarn, -1, this_dbinc_key, 0, dbinc_key, translateArcLogSCNRange2.incarn) AND (translateArcLogSCNRange2.thread# IS NULL OR thread# = translateArcLogSCNRange2.thread#) AND (translateArcLogSCNRange2.sequence# IS NULL OR sequence# = translateArcLogSCNRange2.sequence#) AND next_change# > NVL(fromSCN, 0) AND first_change# < NVL(toSCN, MAXSCNVAL) AND (toTime IS NULL OR first_time < toTime) AND (archived = 'YES' OR -- this will also filter out cleared logs (online = TRUE# and archived = 'NO' and name IS NOT NULL and resetlogs_change# = this_reset_scn and resetlogs_time = this_reset_time)) AND isstatusMatch(status,statusMask) = TRUE# UNION ALL SELECT DISTINCT -- to filter duplicates TO_NUMBER(NULL), TO_NUMBER(NULL), -1, -- to sort last brl.thread#, brl.sequence#, TO_CHAR(NULL), brl.low_scn, brl.low_time, brl.next_scn, brl.next_time, dbinc.reset_scn, dbinc.reset_time, brl.blocks, brl.block_size, 'D', TO_DATE(NULL), 0, 'NO', 'NO', 'N', brl.terminal, 0, 0 site_key_order_col, 0 source_dbid FROM brl, dbinc WHERE brl.dbinc_key = dbinc.dbinc_key -- join condition AND dbinc.db_key = this_db_key AND ((canApplyAnyRedo = TRUE# AND (translateArcLogSCNRange2.reset_scn IS NULL OR (translateArcLogSCNRange2.reset_scn = dbinc.reset_scn AND translateArcLogSCNRange2.reset_time = dbinc.reset_time))) OR (dbinc.dbinc_key = this_dbinc_key)) AND brl.dbinc_key = DECODE (translateArcLogSCNRange2.incarn, -1, this_dbinc_key, 0, brl.dbinc_key, translateArcLogSCNRange2.incarn) AND (translateArcLogSCNRange2.thread# IS NULL OR thread# = translateArcLogSCNRange2.thread#) AND (translateArcLogSCNRange2.sequence# IS NULL OR sequence# = translateArcLogSCNRange2.sequence#) AND next_scn > NVL(fromSCN, 0) AND low_scn < NVL(toSCN, MAXSCNVAL) AND (toTime IS NULL OR low_time < toTime) -- ignore this as client will call us with deleted status always -- AND isstatusMatch('D',statusMask) = TRUE# UNION -- to filter duplicates between brl and xal SELECT DISTINCT -- to filter duplicates TO_NUMBER(NULL), TO_NUMBER(NULL), -1, -- to sort last xal.thread#, xal.sequence#, TO_CHAR(NULL), xal.low_scn, xal.low_time, xal.next_scn, xal.next_time, dbinc.reset_scn, dbinc.reset_time, xal.blocks, xal.block_size, 'D', TO_DATE(NULL), 0, 'NO', 'NO', 'N', xal.terminal, xal.site_key, 0 site_key_order_col, 0 source_dbid FROM xal, dbinc WHERE xal.dbinc_key = dbinc.dbinc_key -- join condition AND dbinc.db_key = this_db_key AND ((canApplyAnyRedo = TRUE# AND (translateArcLogSCNRange2.reset_scn IS NULL OR (translateArcLogSCNRange2.reset_scn = dbinc.reset_scn AND translateArcLogSCNRange2.reset_time = dbinc.reset_time))) OR (dbinc.dbinc_key = this_dbinc_key)) AND xal.dbinc_key = DECODE (translateArcLogSCNRange2.incarn, -1, this_dbinc_key, 0, xal.dbinc_key, translateArcLogSCNRange2.incarn) AND (translateArcLogSCNRange2.thread# IS NULL OR thread# = translateArcLogSCNRange2.thread#) AND (translateArcLogSCNRange2.sequence# IS NULL OR sequence# = translateArcLogSCNRange2.sequence#) AND next_scn > NVL(fromSCN, 0) AND low_scn < NVL(toSCN, MAXSCNVAL) AND (toTime IS NULL OR low_time < toTime) -- ignore this as client will call us with deleted status always -- AND isstatusMatch('D',statusMask) = TRUE# AND ((user_site_key = xal.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = NVL(xal.site_key, this_site_key))))) -- lastest incarnation last as recovery needs them in that order -- reset_scn , reset_time, thread#, sequence#, terminal desc, -- site_key_order_col desc, is_recovery_dest_file desc, al_stamp desc; ORDER BY 11, 12, 4, 5, 21 DESC, 23 DESC, 18 DESC, 3 DESC; CURSOR translateArcLogPattern( pattern IN varchar2 ,statusMask IN binary_integer ,online IN number -- IGNORED! ,needstby IN number DEFAULT NULL) RETURN alRec_t IS SELECT al_key, recid, stamp, thread#, sequence#, name, first_change#, first_time, next_change#, next_time, resetlogs_change#, resetlogs_time, blocks, block_size, status, completion_time, 0, is_recovery_dest_file, compressed, decode(is_standby, 'YES', 'Y', 'N') stby, terminal, site_key, 0 site_key_order_col, 0 source_dbid FROM rc_archived_log WHERE (canApplyAnyRedo = TRUE# OR dbinc_key = this_dbinc_key) AND db_key = this_db_key AND (pattern is null or name like pattern) AND decode(statusMask, BSavailable, decode(status, 'A', TRUE#, FALSE#), isStatusMatch(status, statusMask)) = TRUE# AND archived = 'YES' AND ((client_site_aware = TRUE# AND ((user_site_key = site_key) OR -- interested in specific site (user_site_key IS NULL AND ((logs_shared = TRUE#) OR (this_site_key = nvl(site_key, this_site_key)))))) OR (client_site_aware = FALSE# AND (needstby is NULL OR nvl(is_standby, 'NO') = decode(needstby, TRUE#, 'YES', 'NO') OR (terminal = 'YES') OR (first_change# >= lbacked_al_next_scn AND first_change# <= standby_became_primary_scn)))) -- latest incarnation last, as recovery needs them in that order. ORDER BY resetlogs_change#, resetlogs_time, thread#, sequence#, terminal desc, is_recovery_dest_file desc, stamp desc; --------------------------------- -- Controlfilecopy Translation -- --------------------------------- getControlFileCopySingleRow boolean; getControlFileCopyCursor varchar2(30); --------------------- -- getDatafileCopy -- --------------------- getDatafileCopyCursor varchar2(30); getDatafileCopyNoRows noRows_t; getDatafileCopyDuplicates number; -- match file number getDatafileCopyLast rcvRec_t; getDatafileCopySingleRow boolean; getDatafileCopyLatestOnly boolean; ------------------ -- getProxyCopy -- ------------------ getProxyCopyCursor varchar2(30); getProxyCopyNoRows noRows_t; getProxyCopyByHandle boolean; -------------------- -- getBackupPiece -- -------------------- getBackupPieceCursor varchar2(30); getBackupPieceNoRows noRows_t; getBackupPieceDuplicates number; -- TRUE# -> duplicates OK -- FALSE# -> eliminate duplicates getBackupPieceLast bpRec_t; -- getBackupPieceDeviceType must not be null if getBackupPieceDuplicates -- is FALSE#. getBackupPieceDeviceType bp.device_type%TYPE; getBackupPieceExpectedPieces number; getBackupPiecePieceCount number; getBackupPieceByHandle boolean; getBackupPieceAvailableMask binary_integer; getBackupPieceSeekLast bpRec_t; getBackupPieceCopyNumber number; getBackupPieceBskey number; ----------------------- -- For compatibility -- ----------------------- findSpfileBackupCursor boolean; -- TRUE# -> cursor opened findControlfileBackupCursor boolean; -- TRUE# -> cursor opened ---------- -- List -- ---------- listGetBackupTag bp.tag%TYPE; listGetBackupAvailableMask binary_integer; listGetProxyDatafileCursor varchar2(30); -- This cursor is just a bad idea. It is here only for backward -- compatibility with 8.1. CURSOR lbal2(thread# number, lowseq number, highseq number, lowscn number, highscn number, from_time date , until_time date) RETURN rcvRec_t IS SELECT backupSet_con_t type_con, brl.brl_key key_con, brl.brl_recid recid_con, brl.brl_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, to_number(null) bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, brl.blocks blocks_con, brl.block_size blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, full_act_t type_act, to_number(null) fromSCN_act, to_number(null) toSCN_act, to_date(null) toTime_act, to_number(null) rlgSCN_act, to_date(null) rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, to_number(null) dfNumber_obj, to_number(null) dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, brl.sequence# logSequence_obj, brl.thread# logThread_obj, dbinc.reset_scn logRlgSCN_obj, dbinc.reset_time logRlgTime_obj, brl.low_scn logLowSCN_obj, brl.low_time logLowTime_obj, brl.next_scn logNextSCN_obj, brl.next_time logNextTime_obj, brl.terminal logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj from brl, bs, dbinc where dbinc.db_key = this_db_key -- this database and dbinc.dbinc_key = brl.dbinc_key -- join dbinc, brl and bs.bs_key = brl.bs_key -- join bs, brl and (lbal2.thread# is null or brl.thread# = lbal2.thread#) and (brl.sequence# between nvl(lbal2.lowseq, 0) and nvl(lbal2.highseq, MAXSEQVAL)) and (lowscn is null or brl.low_scn >= lowscn) and (highscn is null or brl.next_scn< highscn) and (from_time is null or bs.completion_time >= from_time) and (until_time is null or bs.completion_time <= until_time) --and (pattern is null or brl.name like pattern) and bs.status != 'D' and bs.bck_type = 'L' -- only archivelog backups order by bs.bs_key, brl.thread#, brl.sequence#, brl.terminal desc; CURSOR ldbi( db_name varchar2, all_databases number) IS SELECT db_key, dbinc_key, name, dbid, current_incarnation, resetlogs_change#, resetlogs_time, status dbinc_status FROM rc_database_incarnation WHERE (all_databases = 1) OR -- user wants all database incarnations ((all_databases = 0) AND ((name = ldbi.db_name) OR -- user asked for specific database (db_name is NULL AND this_db_key=db_key))) -- user wants mounted db ORDER BY dbid, resetlogs_change#; CURSOR lnni( db_name varchar2, alldbs number) IS SELECT node.db_key, dbid, name, database_role, db_unique_name FROM rc_database, node WHERE rc_database.db_key = node.db_key AND ((alldbs = 1) OR (lnni.db_name IS NOT NULL AND upper(lnni.db_name) = name) OR (lnni.db_name IS NULL AND this_db_key = node.db_key)) ORDER BY dbid, database_role; -- get primary database site first for each database. CURSOR lrtbs IS SELECT DISTINCT ts.ts#, ts.ts_name FROM ts, tsatt, ckp ckp1, ckp ckp2 -- Join ts to tsatt by dbinc_key, ts#, creation_scn WHERE ts.dbinc_key = tsatt.dbinc_key AND ts.ts# = tsatt.ts# AND ts.create_scn = tsatt.create_scn -- Join tsatt to the ckp row containing the first SCN where we know that -- these tsatt values are valid. tsatt.start_ckp_key is never null. AND ckp1.ckp_key = tsatt.start_ckp_key -- Join tsatt to the ckp row containing an SCN beyond which we know that -- these tsatt values are not valid. Do an outer join because -- tsatt.end_ckp_key can be null. AND ckp2.ckp_key(+) = tsatt.end_ckp_key -- Select only rows belonging to the current incarnation. AND ts.dbinc_key = this_dbinc_key -- If there is an until SCN, select only tablespaces created earlier. -- If there is no until SCN, let the row pass (it will be filtered by -- the next condition if it has been dropped). create_scn is never null. AND (ts.create_scn < untilSCN or untilSCN is NULL) -- Select rows for tablespaces that have never been dropped, or which -- were dropped more recently than the until SCN. If there is a -- drop_scn and no until SCN (which means we don't want this row), -- then both conditions are false (NULL always compares false, and -- drop_scn is not null). AND (ts.drop_scn > untilSCN or ts.drop_scn is NULL) -- If there is an until SCN, select only those rows where we know that -- the tsatt values were valid at or before that SCN. ckp1.ckp_scn is -- never null. If there is no until SCN, let the row pass (it will be -- filtered by the next condition if the tsatt values are no longer -- valid). AND (ckp1.ckp_scn <= untilSCN or untilSCN is NULL) -- Select only those rows for which the tsatt values are still valid, or -- were known to be valid AFTER the until SCN. If there is no until SCN -- and the tsatt values are no longer valid (which means we don't want -- this row), then both conditions will be false. AND (ckp2.ckp_scn > untilSCN or ckp2.ckp_scn is NULL) -- All that was just to get us here. Select only tablespaces which -- contain one or more rollback segments. AND tsatt.rbs_count > 0 ORDER BY 1; ------------------------------------------------------- -- listTranslateProxyDFRecid and translateBackupFile -- ------------------------------------------------------- rcvRec_last rcvRec_t; -- last record returned from: -- getRecoveryAction -- findControlFileBackup ------------------------- -- getOfflineRangeCopy -- ------------------------- CURSOR getOfflineRangeCopy_c( offrRecid number ,offrCkpSCN number ,cfCreTime date ,dbincKey number) RETURN rcvRec_t IS SELECT imageCopy_con_t type_con, ccf_key key_con, ccf_recid recid_con, ccf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, fname fileName_con, tag tag_con, to_number(null) copyNumber_con, status status_con, to_number(null) blocks_con, -- ccf doesn't have blocks block_size blockSize_con, 'DISK' deviceType_con, completion_time compTime_con, create_time cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, ccf.ckp_scn toSCN_act, ccf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, ccf.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, ccf.keep_options keep_options, ccf.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, is_recovery_dest_file isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, to_number(null) newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM ccf, dbinc WHERE dbinc.dbinc_key = getOfflineRangeCopy_c.dbincKey AND dbinc.dbinc_key = ccf.dbinc_key AND getOfflineRangeCopy_c.cfCretime = create_time AND getOfflineRangeCopy_c.offrCkpSCN < ccf.ckp_scn AND getOfflineRangeCopy_c.offrRecid >= min_offr_recid AND status = 'A' AND ((user_site_key = ccf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(ccf.site_key, this_site_key))))) ORDER BY stamp_con desc; -- list all backups/copies/offline-ranges for all datafiles, -- for REPORT OBSOLETE cursor rddf is -- datafile copies select 2 preference, file#, COPY filetype, checkpoint_change#, checkpoint_time, resetlogs_change#, resetlogs_time, 0 incremental_change#, decode(decode(online_fuzzy,'NO',0,1)+decode(backup_fuzzy,'NO',0,1), 0,greatest(nvl(absolute_fuzzy_change#,0), nvl(recovery_fuzzy_change#,0)), maxscnval) fuzzy_change#, recid, stamp, name, 0 set_stamp, 0 set_count, cdf_key key, completion_time, 'DISK' device_type from rc_datafile_copy where db_key = this_db_key and status != 'D' and ((user_site_key = rc_datafile_copy.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(rc_datafile_copy.site_key, this_site_key))))) union all -- proxy datafile copies select 3, file#, PROXY, checkpoint_change#, checkpoint_time, resetlogs_change#, resetlogs_time, 0, decode(decode(online_fuzzy,'NO',0,1)+decode(backup_fuzzy,'NO',0,1), 0,greatest(nvl(absolute_fuzzy_change#,0), nvl(recovery_fuzzy_change#,0)), maxscnval), recid, stamp, handle, 0, 0, xdf_key, completion_time, device_type from rc_proxy_datafile where db_key = this_db_key and status != 'D' and ((user_site_key = rc_proxy_datafile.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(rc_proxy_datafile.site_key, this_site_key))))) union all -- datafiles in backup sets select decode(bs.bck_type, 'D', 4, 'I', 5), file#, decode(bs.bck_type, 'D', FULL_DF_BACKUP, 'I', INCREMENTAL_DF_BACKUP), bdf.ckp_scn, bdf.ckp_time, dbinc.reset_scn, dbinc.reset_time, bdf.incr_scn, nvl(bdf.abs_fuzzy_scn,0), bs.bs_recid, bs.bs_stamp, null, bs.set_stamp, bs.set_count, bs.bs_key, bs.completion_time, null from bdf, bs, dbinc where dbinc.db_key = this_db_key -- this database and dbinc.dbinc_key = bdf.dbinc_key -- join dbinc, bdf and bdf.bs_key = bs.bs_key -- join bdf, bs and bs.status != 'D' and bs.bck_type != 'L' -- only datafile backups and (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) union all -- controlfile copies select 2, 0, COPY, checkpoint_change#, checkpoint_time, resetlogs_change#, resetlogs_time, 0, 0, recid, stamp, name, 0, 0, ccf_key, completion_time, 'DISK' from rc_controlfile_copy where db_key = this_db_key and status != 'D' and ((user_site_key = rc_controlfile_copy.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(rc_controlfile_copy.site_key,this_site_key))))) union all -- proxy controlfile copies select 3, 0, PROXY, checkpoint_change#, checkpoint_time, resetlogs_change#, resetlogs_time, 0, 0, recid, stamp, handle, 0, 0, xcf_key, completion_time, device_type from rc_proxy_controlfile where db_key = this_db_key and status != 'D' and ((user_site_key = rc_proxy_controlfile.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key =nvl(rc_proxy_controlfile.site_key,this_site_key))))) union all -- controlfiles in backup sets select 4, 0, FULL_DF_BACKUP, bcf.ckp_scn, bcf.ckp_time, dbinc.reset_scn, dbinc.reset_time, 0, 0, bs.bs_recid, bs.bs_stamp, null, bs.set_stamp, bs.set_count, bs.bs_key, bs.completion_time, null from bcf, bs, dbinc where dbinc.db_key = this_db_key -- this database and dbinc.dbinc_key = bcf.dbinc_key -- join dbinc, bcf and bcf.bs_key = bs.bs_key -- join bcf, bs and bs.status != 'D' and bs.bck_type != 'L' -- ignore archivelog backups and (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) union all -- offline ranges select 1, file#, OFFLINE_RANGE, online_change#, online_time, resetlogs_change#, resetlogs_time, offline_change#, 0, recid, stamp, null, 0, 0, 0, online_time, null from rc_offline_range ofr, rc_database_incarnation di where ofr.db_key = this_db_key and di.db_key = this_db_key and ofr.dbinc_key = di.dbinc_key -- The algorithm in krmkcofl (Construct Obsolete File List) depends on the -- following ordering. order by 2 asc, -- file# 4 desc, -- checkpoint_change# 1 asc, -- preference 15 desc; -- completion_time, to break ties if all else is equal ---------------- -- BMR cursor -- ---------------- -- to add row in corruption table to BMR list- CURSOR translateDatabaseCorruption_c(dfnumber IN number) IS SELECT file#, block#, blocks FROM rc_database_block_corruption bc WHERE bc.db_key = this_db_key AND -- belongs to this database bc.dbinc_key = this_dbinc_key AND bc.file# = nvl(translateDatabaseCorruption_c.dfnumber, bc.file#) AND bc.corruption_type != 'NOLOGGING' ORDER BY file#, block#; -- order same as in translateCorruptList_c. -- do not change this as krmkcortr is -- dependent on this -- database block corruption should be site aware; TODO ------------------------ -- RMAN configuration -- ------------------------ CURSOR cntConfig_c IS SELECT COUNT(*) FROM CONF WHERE db_key = this_db_key AND db_unique_name = nvl(user_db_unique_name, this_db_unique_name); CURSOR getPrimarySite_c IS SELECT db_unique_name FROM NODE WHERE db_key = this_db_key AND database_role = 'PRIMARY'; CURSOR findConfig_c( name varchar2, value varchar2, db_unique_name varchar2) IS SELECT conf#, name, value FROM rc_rman_configuration rm WHERE db_key = this_db_key -- part of this database AND (findConfig_c.name is null OR UPPER(findConfig_c.name) = UPPER(rm.name)) AND (findConfig_c.value is null OR UPPER(findConfig_c.value) = UPPER(rm.value)) AND -- site specific rows ((nvl(findConfig_c.db_unique_name, rm.db_unique_name) = rm.db_unique_name) OR rm.db_unique_name IS NULL); -- generic conf rows -- findConfig_c.db_unique_name will be NULL for 9i RMAN and hence we will -- return all configurations. -------------------- -- Backup History -- -------------------- getLastBackupHistory bhistoryRec_t; -- Obsolete from 9.2.0.1 version onwards CURSOR dfBackupHistory_c1( file# IN number ,crescn IN number ,device_type IN varchar2) RETURN bhistoryRec_t IS SELECT bdf.file# dfNumber, bdf.create_scn create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, bdf.ckp_scn ckp_scn, bdf.ckp_time ckp_time, nvl(df.stop_scn, 0) stop_scn, to_number(null) logThread, to_number(null) logSequence, to_number(null) setStamp, to_number(null) setCount, bs.completion_time compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, 0 pluggedRonly, 0 pluginSCN, 0 pluginRlgSCN, to_date(null) pluginRlgTime, to_number(null) newcreate_scn, to_number(null) newreset_scn, to_date(null) newreset_time FROM bdf, dbinc, (SELECT /*+no_merge*/ file#, create_scn, stop_scn FROM df WHERE create_scn = dfBackupHistory_c1.crescn AND file# = dfBackupHistory_c1.file# AND dbinc_key = this_dbinc_key) df, (SELECT bs.bs_key, bs.completion_time FROM bs, bp WHERE bp.status = 'A' -- only available pieces AND bs.bck_type != 'L' -- ignore al backups AND bs.bs_key = bp.bs_key -- join bs, bp AND bs.db_key = this_db_key -- this database AND bp.db_key = this_db_key -- this database AND (dfBackupHistory_c1.device_type IS NULL OR dfBackupHistory_c1.device_type = bp.device_type) AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs.bs_key, bp.device_type, bp.copy#, bs.pieces, bs.completion_time HAVING count(distinct bp.piece#) = bs.pieces) bs WHERE bdf.dbinc_key = dbinc.dbinc_key -- join dbinc, bdf AND dbinc.db_key = this_db_key -- this database AND bdf.create_scn = df.create_scn -- create scn match AND bdf.file# = df.file# -- join bdf, df AND bdf.bs_key = bs.bs_key -- join bdf, bs UNION ALL SELECT cdf.file# dfNumber, cdf.create_scn create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, cdf.ckp_scn ckp_scn, cdf.ckp_time ckp_time, nvl(df.stop_scn, 0) stop_scn, to_number(null) logThread, to_number(null) logSequence, to_number(null) setStamp, to_number(null) setCount, cdf.completion_time compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, 0 pluggedRonly, 0 pluginSCN, 0 pluginRlgSCN, to_date(null) pluginRlgTime, to_number(null) newcreate_scn, to_number(null) newreset_scn, to_date(null) newreset_time FROM cdf, dbinc, (SELECT /*+no_merge */ file#, create_scn, stop_scn FROM df WHERE create_scn = dfBackupHistory_c1.crescn AND file# = dfBackupHistory_c1.file# AND dbinc_key = this_dbinc_key) df WHERE cdf.dbinc_key = dbinc.dbinc_key -- join dbinc, cdf AND dbinc.db_key = this_db_key -- this database AND cdf.create_scn = df.create_scn -- create scn match AND cdf.file# = df.file# -- join cdf, df AND cdf.status = 'A' -- available copy AND (dfBackupHistory_c1.device_type IS NULL OR dfBackupHistory_c1.device_type = 'DISK') AND ((user_site_key = cdf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(cdf.site_key, this_site_key))))) UNION ALL SELECT xdf.file# dfNumber, xdf.create_scn create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, xdf.ckp_scn ckp_scn, xdf.ckp_time ckp_time, nvl(df.stop_scn, 0) stop_scn, to_number(null) logThread, to_number(null) logSequence, to_number(null) setStamp, to_number(null) setCount, xdf.completion_time compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, 0 pluggedRonly, 0 pluginSCN, 0 pluginRlgSCN, to_date(null) pluginRlgTime, to_number(null) newcreate_scn, to_number(null) newreset_scn, to_date(null) newreset_time FROM xdf, dbinc, (SELECT /*+no_merge*/ file#, create_scn, stop_scn FROM df WHERE create_scn = dfBackupHistory_c1.crescn AND file# = dfBackupHistory_c1.file# AND dbinc_key = this_dbinc_key) df WHERE xdf.dbinc_key = dbinc.dbinc_key -- join xdf, dbinc AND dbinc.db_key = this_db_key -- this database AND xdf.create_scn = df.create_scn -- create scn match AND xdf.file# = df.file# -- join xdf, df AND xdf.status = 'A' -- available proxy df AND (dfBackupHistory_c1.device_type IS NULL OR dfBackupHistory_c1.device_type = xdf.device_type) AND ((user_site_key = xdf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xdf.site_key, this_site_key))))) -- order is important to find number of backups and max completion time ORDER BY dfNumber, create_scn, reset_scn, reset_time, ckp_scn desc, stop_scn desc, compTime; CURSOR dfBackupHistory_c2( device_type IN varchar2 ,cmd IN varchar2 ,ktag IN varchar2 ,pattern1 IN varchar2 ,pattern2 IN varchar2 ,pattern3 IN varchar2 ,pattern4 IN varchar2) RETURN bhistoryRec_t IS SELECT bdf.file# dfNumber, bdf.create_scn create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, bdf.ckp_scn ckp_scn, bdf.ckp_time ckp_time, nvl(df.stop_scn, 0) stop_scn, to_number(null) logThread, to_number(null) logSequence, to_number(null) setStamp, to_number(null) setCount, bs.completion_time compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, decode(bdf.plugged_readonly, 'YES', 1, 0) pluggedRonly, bdf.plugin_scn pluginSCN, bdf.plugin_reset_scn pluginRlgSCN, bdf.plugin_reset_time pluginRlgTime, decode(bdf.plugin_scn, 0, bdf.create_scn, bdf.plugin_scn) newcreate_scn, decode(bdf.plugin_reset_scn, 0, dbinc.reset_scn, bdf.plugin_reset_scn) newreset_scn, nvl(bdf.plugin_reset_time, dbinc.reset_time) newreset_time FROM bdf, dbinc, df, (SELECT bs.bs_key, bs.completion_time FROM bs, bp WHERE bp.status = 'A' -- only available pieces AND bs.bck_type != 'L' -- ignore al backups AND bs.bs_key = bp.bs_key -- join bs, bp AND bs.db_key = this_db_key -- this database AND bp.db_key = this_db_key -- this database AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) AND (dfBackupHistory_c2.cmd is null OR -- bug 6658764 dfBackupHistory_c2.cmd != 'B' OR (dfBackupHistory_c2.cmd = 'B' AND -- Backup command and (dfBackupHistory_c2.ktag is null AND -- nokeep cmd matches bs.keep_options = 0) OR -- nokeep backup or (dfBackupHistory_c2.ktag = bp.tag and -- keep backup cmd tag bs.keep_options != 0))) -- matches keep backup AND (dfBackupHistory_c2.device_type IS NULL OR dfBackupHistory_c2.device_type = bp.device_type) AND ((dfBackupHistory_c2.pattern1 IS NULL AND dfBackupHistory_c2.pattern2 IS NULL AND dfBackupHistory_c2.pattern3 IS NULL AND dfBackupHistory_c2.pattern4 IS NULL) OR (bp.handle LIKE dfBackupHistory_c2.pattern1 OR bp.handle LIKE dfBackupHistory_c2.pattern2 OR bp.handle LIKE dfBackupHistory_c2.pattern3 OR bp.handle LIKE dfBackupHistory_c2.pattern4)) GROUP BY bs.bs_key, bp.device_type, bp.copy#, bs.pieces, bs.completion_time HAVING count(distinct bp.piece#) = bs.pieces) bs WHERE bdf.dbinc_key = dbinc.dbinc_key -- join dbinc, bdf AND dbinc.db_key = this_db_key -- this database AND df.dbinc_key = this_dbinc_key -- this incarnation AND ((df.plugin_scn = 0 AND -- create/plugin scn match bdf.plugin_scn = 0 AND bdf.create_scn = df.create_scn) OR (df.plugin_scn != 0 AND bdf.plugin_scn = df.plugin_scn)) AND bdf.file# = df.file# -- join bdf, df AND bdf.bs_key = bs.bs_key -- join bdf, bs AND (tc_database = TRUE# OR isTranslatedFno(df.file#) = TRUE#) UNION ALL SELECT cdf.file# dfNumber, cdf.create_scn create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, cdf.ckp_scn ckp_scn, cdf.ckp_time ckp_time, nvl(df.stop_scn, 0) stop_scn, to_number(null) logThread, to_number(null) logSequence, to_number(null) setStamp, to_number(null) setCount, cdf.completion_time compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, decode(cdf.plugged_readonly, 'YES', 1, 0) pluggedRonly, cdf.plugin_scn pluginSCN, cdf.plugin_reset_scn pluginRlgSCN, cdf.plugin_reset_time pluginRlgTime, decode(cdf.plugin_scn, 0, cdf.create_scn, cdf.plugin_scn) newcreate_scn, decode(cdf.plugin_reset_scn, 0, dbinc.reset_scn, cdf.plugin_reset_scn) newreset_scn, nvl(cdf.plugin_reset_time, dbinc.reset_time) newreset_time FROM cdf, dbinc, df WHERE cdf.dbinc_key = dbinc.dbinc_key -- join dbinc, cdf AND dbinc.db_key = this_db_key -- this database AND df.dbinc_key = this_dbinc_key -- this incarnation AND ((df.plugin_scn = 0 AND -- create/plugin scn match cdf.plugin_scn = 0 AND cdf.create_scn = df.create_scn) OR (df.plugin_scn != 0 AND cdf.plugin_scn = df.plugin_scn)) AND cdf.file# = df.file# -- join cdf, df AND cdf.status = 'A' -- available copy AND (tc_database = TRUE# OR isTranslatedFno(df.file#) = TRUE#) AND (dfBackupHistory_c2.cmd is null OR -- bug 6658764 dfBackupHistory_c2.cmd != 'B' OR (dfBackupHistory_c2.cmd = 'B' AND -- Backup command and (dfBackupHistory_c2.ktag is null AND -- nokeep cmd matches cdf.keep_options = 0) OR -- nokeep backup or (dfBackupHistory_c2.ktag = cdf.tag and -- keep backup cmd tag cdf.keep_options != 0))) -- matches keep backup AND (dfBackupHistory_c2.device_type IS NULL OR dfBackupHistory_c2.device_type = 'DISK') AND ((dfBackupHistory_c2.pattern1 IS NULL AND dfBackupHistory_c2.pattern2 IS NULL AND dfBackupHistory_c2.pattern3 IS NULL AND dfBackupHistory_c2.pattern4 IS NULL) OR (cdf.fname LIKE dfBackupHistory_c2.pattern1 OR cdf.fname LIKE dfBackupHistory_c2.pattern2 OR cdf.fname LIKE dfBackupHistory_c2.pattern3 OR cdf.fname LIKE dfBackupHistory_c2.pattern4)) AND ((user_site_key = cdf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(cdf.site_key, this_site_key))))) UNION ALL SELECT xdf.file# dfNumber, xdf.create_scn create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, xdf.ckp_scn ckp_scn, xdf.ckp_time ckp_time, nvl(df.stop_scn, 0) stop_scn, to_number(null) logThread, to_number(null) logSequence, to_number(null) setStamp, to_number(null) setCount, xdf.completion_time compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, decode(xdf.plugged_readonly, 'YES', 1, 0) pluggedRonly, xdf.plugin_scn pluginSCN, xdf.plugin_reset_scn pluginRlgSCN, xdf.plugin_reset_time pluginRlgTime, decode(xdf.plugin_scn, 0, xdf.create_scn, xdf.plugin_scn) newcreate_scn, decode(xdf.plugin_reset_scn, 0, dbinc.reset_scn, xdf.plugin_reset_scn) newreset_scn, nvl(xdf.plugin_reset_time, dbinc.reset_time) newreset_time FROM xdf, dbinc, df WHERE xdf.dbinc_key = dbinc.dbinc_key -- join xdf, dbinc AND dbinc.db_key = this_db_key -- this database AND df.dbinc_key = this_dbinc_key -- this incarnation AND ((df.plugin_scn = 0 AND -- create/plugin scn match xdf.plugin_scn = 0 AND xdf.create_scn = df.create_scn) OR (df.plugin_scn != 0 AND xdf.plugin_scn = df.plugin_scn)) AND xdf.file# = df.file# -- join xdf, df AND xdf.status = 'A' -- available proxy df AND (tc_database = TRUE# OR isTranslatedFno(df.file#) = TRUE#) AND (dfBackupHistory_c2.cmd is null OR -- bug 6658764 dfBackupHistory_c2.cmd != 'B' OR (dfBackupHistory_c2.cmd = 'B' AND -- Backup command and (dfBackupHistory_c2.ktag is null AND -- nokeep cmd matches xdf.keep_options = 0) OR -- nokeep backup or (dfBackupHistory_c2.ktag = xdf.tag and -- keep backup cmd tag xdf.keep_options != 0))) -- matches keep backup AND (dfBackupHistory_c2.device_type IS NULL OR dfBackupHistory_c2.device_type = xdf.device_type) AND ((dfBackupHistory_c2.pattern1 IS NULL AND dfBackupHistory_c2.pattern2 IS NULL AND dfBackupHistory_c2.pattern3 IS NULL AND dfBackupHistory_c2.pattern4 IS NULL) OR (xdf.handle LIKE dfBackupHistory_c2.pattern1 OR xdf.handle LIKE dfBackupHistory_c2.pattern2 OR xdf.handle LIKE dfBackupHistory_c2.pattern3 OR xdf.handle LIKE dfBackupHistory_c2.pattern4)) AND ((user_site_key = xdf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xdf.site_key, this_site_key))))) -- order is important to find number of backups and max completion time ORDER BY dfNumber, newcreate_scn, newreset_scn, newreset_time, ckp_scn desc, stop_scn desc, compTime desc; CURSOR dcBackupHistory_c( device_type IN varchar2 ,cmd IN varchar2 ,ktag IN varchar2 ,pattern1 IN varchar2 ,pattern2 IN varchar2 ,pattern3 IN varchar2 ,pattern4 IN varchar2) RETURN bhistoryRec_t IS SELECT bdf.file# dfNumber, bdf.create_scn create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, bdf.ckp_scn ckp_scn, bdf.ckp_time ckp_time, cdf.ckp_scn stop_scn, to_number(null) logThread, to_number(null) logSequence, to_number(null) setStamp, to_number(null) setCount, bs.completion_time compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, decode(bdf.plugged_readonly, 'YES', 1, 0) pluggedRonly, bdf.plugin_scn pluginSCN, bdf.plugin_reset_scn pluginRlgSCN, bdf.plugin_reset_time pluginRlgTime, decode(bdf.plugin_scn, 0, bdf.create_scn, bdf.plugin_scn) newcreate_scn, decode(bdf.plugin_reset_scn, 0, dbinc.reset_scn, bdf.plugin_reset_scn) newreset_scn, nvl(bdf.plugin_reset_time, dbinc.reset_time) newreset_time FROM bdf, dbinc, (SELECT DISTINCT cdf.file#, cdf.create_scn, cdf.plugin_scn, cdf.plugged_readonly, cdf.ckp_scn, cdf.ckp_time, cdf.dbinc_key FROM cdf, dbinc WHERE cdf.dbinc_key = dbinc.dbinc_key AND dbinc.db_key = this_db_key AND ((user_site_key = cdf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(cdf.site_key, this_site_key))))) AND cdf.status = 'A') cdf, (SELECT bs.bs_key, bs.completion_time FROM bs, bp WHERE bp.status = 'A' -- only available pieces AND bs.bck_type != 'L' -- ignore al backups AND bs.bs_key = bp.bs_key -- join bs, bp AND bs.db_key = this_db_key -- this database AND bp.db_key = this_db_key -- this database AND (dcBackupHistory_c.cmd is null OR -- bug 6658764 dcBackupHistory_c.cmd != 'B' OR (dcBackupHistory_c.cmd = 'B' AND -- Backup command and (dcBackupHistory_c.ktag is null AND -- nokeep cmd matches bs.keep_options = 0) OR -- nokeep backup or (dcBackupHistory_c.ktag = bp.tag and -- keep backup cmd tag bs.keep_options != 0))) -- matches keep backup AND (dcBackupHistory_c.device_type IS NULL OR dcBackupHistory_c.device_type = bp.device_type) AND ((dcBackupHistory_c.pattern1 IS NULL AND dcBackupHistory_c.pattern2 IS NULL AND dcBackupHistory_c.pattern3 IS NULL AND dcBackupHistory_c.pattern4 IS NULL) OR (bp.handle LIKE dcBackupHistory_c.pattern1 OR bp.handle LIKE dcBackupHistory_c.pattern2 OR bp.handle LIKE dcBackupHistory_c.pattern3 OR bp.handle LIKE dcBackupHistory_c.pattern4)) AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs.bs_key, bp.device_type, bp.copy#, bs.pieces, bs.completion_time HAVING count(distinct bp.piece#) = bs.pieces) bs WHERE bdf.dbinc_key = dbinc.dbinc_key -- join dbinc, bdf AND dbinc.db_key = this_db_key -- this database (all inc) AND cdf.plugin_scn = bdf.plugin_scn AND cdf.plugged_readonly = bdf.plugged_readonly AND bdf.create_scn = cdf.create_scn -- create scn match AND bdf.file# = cdf.file# -- join bdf, cdf AND bdf.ckp_scn = cdf.ckp_scn AND bdf.ckp_time = cdf.ckp_time AND bdf.dbinc_key = cdf.dbinc_key AND (bdf.incr_scn = 0 OR bdf.incr_scn = cdf.create_scn) -- full backup AND bdf.bs_key = bs.bs_key -- join bdf, bs UNION ALL SELECT 0 dfNumber, 0 create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, bcf.ckp_scn ckp_scn, bcf.ckp_time ckp_time, ccf.ckp_scn stop_scn, to_number(null) logThread, to_number(null) logSequence, to_number(null) setStamp, to_number(null) setCount, bs.completion_time compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, 0 pluggedRonly, 0 pluginSCN, 0 pluginRlgSCN, to_date(null) pluginRlgTime, 0 newcreate_scn, dbinc.reset_scn newreset_scn, dbinc.reset_time newreset_time FROM bcf, dbinc, (SELECT DISTINCT ccf.ckp_scn, ccf.ckp_time, ccf.dbinc_key FROM ccf, dbinc WHERE ccf.dbinc_key = dbinc.dbinc_key AND dbinc.db_key = this_db_key AND ccf.status = 'A') ccf, (SELECT bs.bs_key, bs.completion_time FROM bs, bp WHERE bp.status = 'A' -- only available pieces AND bs.bck_type != 'L' -- ignore al backups AND bs.bs_key = bp.bs_key -- join bs, bp AND bs.db_key = this_db_key -- this database AND bp.db_key = this_db_key -- this database AND (dcBackupHistory_c.cmd is null OR -- bug 6658764 dcBackupHistory_c.cmd != 'B' OR (dcBackupHistory_c.cmd = 'B' AND -- Backup command and (dcBackupHistory_c.ktag is null AND -- nokeep cmd matches bs.keep_options = 0) OR -- nokeep backup or (dcBackupHistory_c.ktag = bp.tag and -- keep backup cmd tag bs.keep_options != 0))) -- matches keep backup AND (dcBackupHistory_c.device_type IS NULL OR dcBackupHistory_c.device_type = bp.device_type) AND ((dcBackupHistory_c.pattern1 IS NULL AND dcBackupHistory_c.pattern2 IS NULL AND dcBackupHistory_c.pattern3 IS NULL AND dcBackupHistory_c.pattern4 IS NULL) OR (bp.handle LIKE dcBackupHistory_c.pattern1 OR bp.handle LIKE dcBackupHistory_c.pattern2 OR bp.handle LIKE dcBackupHistory_c.pattern3 OR bp.handle LIKE dcBackupHistory_c.pattern4)) AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs.bs_key, bp.device_type, bp.copy#, bs.pieces, bs.completion_time HAVING count(distinct bp.piece#) = bs.pieces) bs WHERE bcf.dbinc_key = dbinc.dbinc_key -- join dbinc, bcf AND dbinc.db_key = this_db_key -- this database (all inc) AND bcf.ckp_scn = ccf.ckp_scn AND bcf.ckp_time = ccf.ckp_time AND bcf.dbinc_key = ccf.dbinc_key AND bcf.bs_key = bs.bs_key -- join bcf, bs UNION ALL SELECT cdf.file# dfNumber, cdf.create_scn create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, cdf.ckp_scn ckp_scn, cdf.ckp_time ckp_time, cdf.ckp_scn stop_scn, to_number(null) logThread, to_number(null) logSequence, to_number(null) setStamp, to_number(null) setCount, cdf.completion_time compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, decode(cdf.plugged_readonly, 'YES', 1, 0) pluggedRonly, cdf.plugin_scn pluginSCN, cdf.plugin_reset_scn pluginRlgSCN, cdf.plugin_reset_time pluginRlgTime, decode(cdf.plugin_scn, 0, cdf.create_scn, cdf.plugin_scn) newcreate_scn, decode(cdf.plugin_reset_scn, 0, dbinc.reset_scn, cdf.plugin_reset_scn) newreset_scn, nvl(cdf.plugin_reset_time, dbinc.reset_time) newreset_time FROM cdf, dbinc WHERE cdf.dbinc_key = dbinc.dbinc_key -- join xdf, dbinc AND dbinc.db_key = this_db_key -- this database (all inc) AND cdf.status = 'A' -- available backup AND (dcBackupHistory_c.cmd is null OR -- bug 6658764 dcBackupHistory_c.cmd != 'B' OR (dcBackupHistory_c.cmd = 'B' AND -- Backup command and (dcBackupHistory_c.ktag is null AND -- nokeep cmd matches cdf.keep_options = 0) OR -- nokeep backup or (dcBackupHistory_c.ktag = cdf.tag and -- keep backup cmd tag cdf.keep_options != 0))) -- matches keep backup AND (dcBackupHistory_c.device_type IS NULL OR dcBackupHistory_c.device_type = 'DISK') AND (dcBackupHistory_c.pattern1 IS NOT NULL OR dcBackupHistory_c.pattern2 IS NOT NULL OR dcBackupHistory_c.pattern3 IS NOT NULL OR dcBackupHistory_c.pattern4 IS NOT NULL) AND (cdf.fname LIKE dcBackupHistory_c.pattern1 OR cdf.fname LIKE dcBackupHistory_c.pattern2 OR cdf.fname LIKE dcBackupHistory_c.pattern3 OR cdf.fname LIKE dcBackupHistory_c.pattern4) AND ((user_site_key = cdf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(cdf.site_key, this_site_key))))) UNION ALL SELECT xdf.file# dfNumber, xdf.create_scn create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, xdf.ckp_scn ckp_scn, xdf.ckp_time ckp_time, cdf.ckp_scn stop_scn, to_number(null) logThread, to_number(null) logSequence, to_number(null) setStamp, to_number(null) setCount, xdf.completion_time compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, decode(xdf.plugged_readonly, 'YES', 1, 0) pluggedRonly, xdf.plugin_scn pluginSCN, xdf.plugin_reset_scn pluginRlgSCN, xdf.plugin_reset_time pluginRlgTime, decode(xdf.plugin_scn, 0, xdf.create_scn, xdf.plugin_scn) newcreate_scn, decode(xdf.plugin_reset_scn, 0, dbinc.reset_scn, xdf.plugin_reset_scn) newreset_scn, nvl(xdf.plugin_reset_time, dbinc.reset_time) newreset_time FROM xdf, dbinc, (SELECT DISTINCT cdf.file#, cdf.create_scn, cdf.plugin_scn, cdf.plugged_readonly, cdf.ckp_scn, cdf.ckp_time, cdf.dbinc_key FROM cdf, dbinc WHERE cdf.dbinc_key = dbinc.dbinc_key AND dbinc.db_key = this_db_key AND cdf.status = 'A') cdf WHERE xdf.dbinc_key = dbinc.dbinc_key -- join xdf, dbinc AND dbinc.db_key = this_db_key -- this database (all inc) AND xdf.file# = cdf.file# -- join xdf, cdf AND xdf.plugged_readonly = cdf.plugged_readonly AND xdf.plugin_scn = cdf.plugin_scn AND xdf.create_scn = cdf.create_scn -- create scn match AND xdf.dbinc_key = cdf.dbinc_key AND xdf.ckp_scn = cdf.ckp_scn AND xdf.ckp_time = cdf.ckp_time AND xdf.status = 'A' -- available proxy df AND (dcBackupHistory_c.cmd is null OR -- bug 6658764 dcBackupHistory_c.cmd != 'B' OR (dcBackupHistory_c.cmd = 'B' AND -- Backup command and (dcBackupHistory_c.ktag is null AND -- nokeep cmd matches xdf.keep_options = 0) OR -- nokeep backup or (dcBackupHistory_c.ktag = xdf.tag and -- keep backup cmd tag xdf.keep_options != 0))) -- matches keep backup AND (dcBackupHistory_c.device_type IS NULL OR dcBackupHistory_c.device_type = xdf.device_type) AND ((dcBackupHistory_c.pattern1 IS NULL AND dcBackupHistory_c.pattern2 IS NULL AND dcBackupHistory_c.pattern3 IS NULL AND dcBackupHistory_c.pattern4 IS NULL) OR (xdf.handle LIKE dcBackupHistory_c.pattern1 OR xdf.handle LIKE dcBackupHistory_c.pattern2 OR xdf.handle LIKE dcBackupHistory_c.pattern3 OR xdf.handle LIKE dcBackupHistory_c.pattern4)) AND ((user_site_key = xdf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xdf.site_key, this_site_key))))) -- order is important to find number of backups and max completion time ORDER BY dfNumber, newcreate_scn, newreset_scn, newreset_time, ckp_scn desc, stop_scn desc, compTime; -- Obsolete from 9.2.0.1 version onwards CURSOR alBackupHistory_c1( thread# IN number, sequence# IN number, device_type IN varchar2) RETURN bhistoryRec_t IS SELECT to_number(null) dfNumber, to_number(null) create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, to_number(null) ckp_scn, to_date(null) ckp_time, to_number(null) stop_scn, brl.thread# logThread, brl.sequence# logSequence, to_number(null) setStamp, to_number(null) setCount, bs.completion_time compTime, 0 nbackups, brl.terminal logTerminal, brl.next_scn next_scn, 0 pluggedRonly, 0 pluginSCN, 0 pluginRlgSCN, to_date(null) pluginRlgTime, to_number(null) newcreate_scn, to_number(null) newreset_scn, to_date(null) newreset_time FROM brl, dbinc, (SELECT /*+no_merge*/ DISTINCT thread#, sequence# FROM al WHERE al.thread# = alBackupHistory_c1.thread# AND al.sequence# = alBackupHistory_c1.sequence# AND al.dbinc_key = this_dbinc_key AND al.status = 'A' AND al.archived = 'Y') al, (SELECT bs.bs_key, bs.completion_time FROM bs, bp WHERE bp.status = 'A' -- only available pieces AND bs.bck_type = 'L' -- only al backups AND bs.bs_key = bp.bs_key -- join bs, bp AND bs.db_key = this_db_key -- this database AND bp.db_key = this_db_key -- this database AND (alBackupHistory_c1.device_type IS NULL OR alBackupHistory_c1.device_type = bp.device_type) AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs.bs_key, bp.device_type, bp.copy#, bs.pieces, bs.completion_time HAVING count(distinct bp.piece#) = bs.pieces) bs WHERE dbinc.dbinc_key = this_dbinc_key -- this incarnation AND brl.dbinc_key = dbinc.dbinc_key -- join brl, dbinc AND dbinc.db_key = this_db_key AND brl.thread# = al.thread# -- join brl and al AND brl.sequence# = al.sequence# AND brl.dbinc_key = this_dbinc_key AND brl.bs_key = bs.bs_key -- join brl,bs UNION ALL SELECT to_number(null) dfNumber, to_number(null) crescn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, to_number(null) ckp_scn, to_date(null) ckp_time, to_number(null) stop_scn, xal.thread# logThread, xal.sequence# logSequence, to_number(null) setStamp, to_number(null) setCount, xal.completion_time compTime, 0 nbackups, xal.terminal logTerminal, xal.next_scn next_scn, 0 pluggedRonly, 0 pluginSCN, 0 pluginRlgSCN, to_date(null) pluginRlgTime, to_number(null) newcreate_scn, to_number(null) newreset_scn, to_date(null) newreset_time FROM xal, dbinc, (SELECT /*+no_merge*/ DISTINCT thread#, sequence# FROM al WHERE al.thread# = alBackupHistory_c1.thread# AND al.sequence# = alBackupHistory_c1.sequence# AND al.dbinc_key = this_dbinc_key AND al.status = 'A' AND al.archived = 'Y') al WHERE xal.dbinc_key = dbinc.dbinc_key -- join xal, dbinc AND dbinc.db_key = this_db_key -- this database AND xal.thread# = al.thread# AND xal.sequence# = al.sequence# AND xal.dbinc_key = this_dbinc_key AND xal.status = 'A' AND (alBackupHistory_c1.device_type IS NULL OR alBackupHistory_c1.device_type = xal.device_type) AND ((user_site_key = xal.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xal.site_key, this_site_key))))) -- order is important to find number of backups and max completion time -- Note that we don't use terminal eor attribute in above join condition, -- as it is not required and only an optimization to select required brls ORDER BY reset_scn, reset_time, logThread, logSequence, logTerminal desc, compTime; CURSOR alBackupHistory_c2( device_type IN varchar2 ,cmd IN varchar2 ,ktag IN varchar2 ,pattern1 IN varchar2 ,pattern2 IN varchar2 ,pattern3 IN varchar2 ,pattern4 IN varchar2) RETURN bhistoryRec_t IS SELECT to_number(null) dfNumber, to_number(null) create_scn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, to_number(null) ckp_scn, to_date(null) ckp_time, to_number(null) stop_scn, brl.thread# logThread, brl.sequence# logSequence, to_number(null) setStamp, to_number(null) setCount, bs.completion_time compTime, 0 nbackups, brl.terminal logTerminal, brl.next_scn next_scn, 0 pluggedRonly, 0 pluginSCN, 0 pluginRlgSCN, to_date(null) pluginRlgTime, to_number(null) newcreate_scn, to_number(null) newreset_scn, to_date(null) newreset_time FROM brl, dbinc, (SELECT DISTINCT al.thread#, al.sequence#, al.dbinc_key FROM al, dbinc WHERE dbinc.dbinc_key = al.dbinc_key AND dbinc.db_key = this_db_key AND al.status = 'A' AND al.archived = 'Y' AND (tc_thread IS NULL OR al.thread# = tc_thread) AND (tc_fromSeq IS NULL OR al.sequence# >= tc_fromSeq) AND (tc_toSeq IS NULL OR al.sequence# <= tc_toSeq) AND (tc_fromSCN IS NULL OR al.next_scn > tc_fromSCN) AND (tc_toSCN IS NULL OR al.low_scn < tc_toSCN) AND (tc_pattern IS NULL OR al.fname like tc_pattern) AND (tc_fromTime IS NULL OR al.next_time > tc_fromTime) AND (tc_toTime IS NULL OR al.low_time <= tc_toTime)) al, (SELECT bs.bs_key, bs.completion_time FROM bs, bp WHERE bp.status = 'A' -- only available pieces AND bs.bck_type = 'L' -- only al backups AND bs.status = 'A' -- available backupset AND bs.bs_key = bp.bs_key -- join bs, bp AND bs.db_key = this_db_key -- this database AND bp.db_key = this_db_key -- this database AND (alBackupHistory_c2.cmd is null OR -- bug 6658764 (alBackupHistory_c2.cmd != 'B' AND alBackupHistory_c2.cmd != 'D') OR ((alBackupHistory_c2.cmd = 'B' OR -- Backup command or alBackupHistory_c2.cmd = 'D') AND -- Delete command and (alBackupHistory_c2.ktag is null AND -- nokeep cmd matches bs.keep_options = 0) OR -- nokeep backup or (alBackupHistory_c2.ktag = bp.tag and -- keep backup cmd tag bs.keep_options != 0))) -- matches keep backup AND (alBackupHistory_c2.device_type IS NULL OR alBackupHistory_c2.device_type = bp.device_type) AND ((alBackupHistory_c2.pattern1 IS NULL AND alBackupHistory_c2.pattern2 IS NULL AND alBackupHistory_c2.pattern3 IS NULL AND alBackupHistory_c2.pattern4 IS NULL) OR (bp.handle LIKE alBackupHistory_c2.pattern1 OR bp.handle LIKE alBackupHistory_c2.pattern2 OR bp.handle LIKE alBackupHistory_c2.pattern3 OR bp.handle LIKE alBackupHistory_c2.pattern4)) AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs.bs_key, bp.device_type, bp.copy#, bs.pieces, bs.completion_time HAVING count(distinct bp.piece#) = bs.pieces) bs WHERE (canApplyAnyRedo = TRUE# OR dbinc.dbinc_key = this_dbinc_key) AND brl.dbinc_key = dbinc.dbinc_key -- join brl, dbinc AND dbinc.db_key = this_db_key AND brl.thread# = al.thread# -- join brl and al AND brl.sequence# = al.sequence# AND brl.dbinc_key = al.dbinc_key AND brl.bs_key = bs.bs_key -- join brl,bs UNION ALL SELECT to_number(null) dfNumber, to_number(null) crescn, dbinc.reset_scn reset_scn, dbinc.reset_time reset_time, to_number(null) ckp_scn, to_date(null) ckp_time, to_number(null) stop_scn, xal.thread# logThread, xal.sequence# logSequence, to_number(null) setStamp, to_number(null) setCount, xal.completion_time compTime, 0 nbackups, xal.terminal logTerminal, xal.next_scn next_scn, 0 pluggedRonly, 0 pluginSCN, 0 pluginRlgSCN, to_date(null) pluginRlgTime, to_number(null) newcreate_scn, to_number(null) newreset_scn, to_date(null) newreset_time FROM xal, dbinc, (SELECT DISTINCT al.thread#, al.sequence#, al.dbinc_key FROM al, dbinc WHERE dbinc.dbinc_key = al.dbinc_key AND dbinc.db_key = this_db_key AND al.status = 'A' AND al.archived = 'Y' AND (tc_thread IS NULL OR al.thread# = tc_thread) AND (tc_fromSeq IS NULL OR al.sequence# >= tc_fromSeq) AND (tc_toSeq IS NULL OR al.sequence# <= tc_toSeq) AND (tc_fromSCN IS NULL OR al.next_scn > tc_fromSCN) AND (tc_toSCN IS NULL OR al.low_scn < tc_toSCN) AND (tc_pattern IS NULL OR al.fname like tc_pattern) AND (tc_fromTime IS NULL OR al.next_time > tc_fromTime) AND (tc_toTime IS NULL OR al.low_time <= tc_toTime)) al WHERE (canApplyAnyRedo = TRUE# OR dbinc.dbinc_key = this_dbinc_key) AND xal.dbinc_key = dbinc.dbinc_key -- join xal, dbinc AND dbinc.db_key = this_db_key -- this database AND xal.thread# = al.thread# AND xal.sequence# = al.sequence# AND xal.dbinc_key = al.dbinc_key AND xal.status = 'A' AND (alBackupHistory_c2.cmd is null OR -- bug 6658764 (alBackupHistory_c2.cmd != 'B' AND alBackupHistory_c2.cmd != 'D') OR ((alBackupHistory_c2.cmd = 'B' OR -- Backup command or alBackupHistory_c2.cmd = 'D') AND -- Delete command and (alBackupHistory_c2.ktag is null AND -- nokeep cmd matches xal.keep_options = 0) OR -- nokeep backup or (alBackupHistory_c2.ktag = xal.tag and -- keep backup cmd tag xal.keep_options != 0))) -- matches keep backup AND (alBackupHistory_c2.device_type IS NULL OR alBackupHistory_c2.device_type = xal.device_type) AND ((alBackupHistory_c2.pattern1 IS NULL AND alBackupHistory_c2.pattern2 IS NULL AND alBackupHistory_c2.pattern3 IS NULL AND alBackupHistory_c2.pattern4 IS NULL) OR (xal.handle LIKE alBackupHistory_c2.pattern1 OR xal.handle LIKE alBackupHistory_c2.pattern2 OR xal.handle LIKE alBackupHistory_c2.pattern3 OR xal.handle LIKE alBackupHistory_c2.pattern4)) AND ((user_site_key = xal.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xal.site_key, this_site_key))))) -- order is important to find number of backups and max completion time -- Note that we don't use terminal eor attribute in above join condition, -- as it is not required and only an optimization to select required brls ORDER BY reset_scn, reset_time, logThread, logSequence, logTerminal desc, compTime; CURSOR bsBackupHistory_c1( set_stamp IN number ,set_count IN number ,device_type IN varchar2 ,pattern1 IN varchar2 ,pattern2 IN varchar2 ,pattern3 IN varchar2 ,pattern4 IN varchar2 ) RETURN bhistoryRec_t IS SELECT to_number(null) dfNumber, to_number(null) create_scn, to_number(null) reset_scn, to_date(null) reset_time, to_number(null) ckp_scn, to_date(null) ckp_time, to_number(null) stop_scn, to_number(null) logThread, to_number(null) logSequence, bs.set_stamp setStamp, bs.set_count setCount, max(bp.completion_time) compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, 0 pluggedRonly, 0 pluginSCN, 0 pluginRlgSCN, to_date(null) pluginRlgTime, to_number(null) newcreate_scn, to_number(null) newreset_scn, to_date(null) newreset_time FROM bs, bp WHERE bs.db_key = this_db_key -- this database AND bp.bs_key = bs.bs_key -- join bs, bp AND bs.set_stamp = bsBackupHistory_c1.set_stamp AND bs.set_count = bsBackupHistory_c1.set_count AND bs.status = 'A' AND (bsBackupHistory_c1.device_type IS NULL OR bsBackupHistory_c1.device_type = bp.device_type) AND ((bsBackupHistory_c1.pattern1 IS NULL AND bsBackupHistory_c1.pattern2 IS NULL AND bsBackupHistory_c1.pattern3 IS NULL AND bsBackupHistory_c1.pattern4 IS NULL) OR (bp.handle LIKE bsBackupHistory_c1.pattern1 OR bp.handle LIKE bsBackupHistory_c1.pattern2 OR bp.handle LIKE bsBackupHistory_c1.pattern3 OR bp.handle LIKE bsBackupHistory_c1.pattern4)) AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs.set_stamp, bs.set_count, bs.pieces, bp.copy#, bp.device_type HAVING count(distinct bp.piece#) = bs.pieces -- order is important to find number of backups and max completion time ORDER BY setStamp, setCount, compTime; CURSOR bsBackupHistory_c2( device_type IN varchar2 ,cmd IN varchar2 ,ktag IN varchar2 ,pattern1 IN varchar2 ,pattern2 IN varchar2 ,pattern3 IN varchar2 ,pattern4 IN varchar2) RETURN bhistoryRec_t IS SELECT to_number(null) dfNumber, to_number(null) create_scn, to_number(null) reset_scn, to_date(null) reset_time, to_number(null) ckp_scn, to_date(null) ckp_time, to_number(null) stop_scn, to_number(null) logThread, to_number(null) logSequence, bs.set_stamp setStamp, bs.set_count setCount, max(bp.completion_time) compTime, 0 nbackups, to_char(null) logTerminal, to_number(null) next_scn, 0 pluggedRonly, 0 pluginSCN, 0 pluginRlgSCN, to_date(null) pluginRlgTime, to_number(null) newcreate_scn, to_number(null) newreset_scn, to_date(null) newreset_time FROM bs, bp WHERE bs.db_key = this_db_key -- this database AND bp.bs_key = bs.bs_key -- join bs, bp AND bs.status = 'A' AND (bsBackupHistory_c2.cmd is null OR -- bug 6658764 bsBackupHistory_c2.cmd != 'B' OR (bsBackupHistory_c2.cmd = 'B' AND -- Backup command and (bsBackupHistory_c2.ktag is null AND -- nokeep cmd matches bs.keep_options = 0) OR -- nokeep backup or (bsBackupHistory_c2.ktag = bp.tag and -- keep backup cmd tag bs.keep_options != 0))) -- matches keep backup AND (bsBackupHistory_c2.device_type IS NULL OR bsBackupHistory_c2.device_type = bp.device_type) AND ((bsBackupHistory_c2.pattern1 IS NULL AND bsBackupHistory_c2.pattern2 IS NULL AND bsBackupHistory_c2.pattern3 IS NULL AND bsBackupHistory_c2.pattern4 IS NULL) OR (bp.handle LIKE bsBackupHistory_c2.pattern1 OR bp.handle LIKE bsBackupHistory_c2.pattern2 OR bp.handle LIKE bsBackupHistory_c2.pattern3 OR bp.handle LIKE bsBackupHistory_c2.pattern4)) AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs.set_stamp, bs.set_count, bs.pieces, bp.copy#, bp.device_type HAVING count(distinct bp.piece#) = bs.pieces -- order is important to find number of backups and max completion time ORDER BY setStamp, setCount, compTime; -------- Copy of Datafile-------- -- Get Copy of Datafile Cursor -- --------------------------------- CURSOR getCopyofDatafile_c2( itag varchar2 ) IS SELECT /*+ first_rows */ file#, creation_change#, resetlogs_change#, resetlogs_time, recid, stamp, name, tag, status, blocks, block_size, completion_time, checkpoint_change#, checkpoint_time, decode(plugged_readonly, 'YES', 1, 0) pluggedRonly, plugin_change#, plugin_resetlogs_change#, plugin_resetlogs_time FROM rc_datafile_copy WHERE status = 'A' AND (itag is NULL or tag = itag) AND (tc_database = TRUE# OR isTranslatedFno(file#) = TRUE#) -- only translated files AND ((user_site_key = rc_datafile_copy.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(rc_datafile_copy.site_key, this_site_key))))) ORDER BY file#, decode(plugged_readonly, 'YES', plugin_change#, checkpoint_change#) desc, recid desc; -------- Copy of Datafile-------- -- Get Copy of Datafile Cursor -- -- Obsoleted in 11.2.0.3 == --------------------------------- CURSOR getCopyofDatafile_c( dfnumber number ,itag varchar2 ,crescn number ,rlgscn number ,rlgtime date ,pluginSCN number ) IS SELECT /*+ first_rows */ recid, stamp, name, tag, status, blocks, block_size, completion_time, checkpoint_change#, checkpoint_time, creation_change#, resetlogs_change#, resetlogs_time, decode(plugged_readonly, 'YES', 1, 0) pluggedRonly FROM rc_datafile_copy WHERE status = 'A' AND (itag is NULL or tag = itag) AND file# = dfnumber AND ((pluginSCN = 0 AND creation_change# = crescn) OR (pluginSCN != 0 AND plugin_change# = pluginSCN)) AND ((plugged_readonly = 'NO' AND resetlogs_change# = rlgscn AND resetlogs_time = rlgtime) OR (plugged_readonly = 'YES' AND plugin_resetlogs_change# = rlgscn AND plugin_resetlogs_time = rlgtime)) AND ((user_site_key = rc_datafile_copy.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(rc_datafile_copy.site_key, this_site_key))))) ORDER BY decode(plugged_readonly, 'YES', plugin_change#, checkpoint_change#) desc; ------------------------ openRecoveryActionCursor ----------------------------- -- NOTES: -- -- Procedure "setUntilTime" attempts to estimate the untilSCN by querying -- all the scn/timestamp pairs in the recovery catalog whose timestamp is -- strictly less than the until time. The result is an estimated untilSCN -- that is very likely to be less than the actual incomplete recovery SCN -- when the RECOVER UNTIL TIME is done. I.e. estimated untilSCN is -- conservative. This calculation considers checkpoint SCN/timestamps for -- backup datafiles and datafile copies, as well as their absolute fuzzy -- SCNs/completion timestamps. We treat the completion timestamp as a -- reasonable estimate of the absolute fuzzy time. -- Note that the absolute fuzzy SCN can be equal to the until SCN because -- it is higher than any SCN in the backup. We also allow the checkpoint -- SCN and media recovery fuzzy SCN to be equal to the until SCN. -- Compute Recovery Action is splitted into 2 cursors. They are disguished -- by the type of actions(type_con) returned. -- -- 1) rcvrecCursor1_c: Cursor which queries large table in order to -- fetch all datafile records. Result is sorted by file#. It is opened -- only once for a RMAN command. It returns (offlineRange_act_t, -- full_act_t, incremental_act_t) actions. -- -- 2) rcvRecCursor2_c: Cursor which queries small table (usually dual). -- It is opened for every file. The result of rcvRecCusor1_c is -- filtered by rcvRecCursor1Filter_c and feeded as input to -- rcvReccursor2_c. It returns (implicitRange_act_t, cleanRange_act_t, -- spanningRange_act_t) actions. -- -- NOTE: All of these cursors must have same ORDER BY clause. -- -- Record which holds the context of openRecoveryAction cursor TYPE rcvRecCursor_t IS RECORD ( currc1 rcvRec_t, -- current fetched rcvRecCursor1_c data reqfno number, -- requested file number reqcrescn number, -- requested file number's create scn reqpluginSCN number, -- requested file number's plugin scn excludeAction binary_integer -- type of action that is excluded ); rcvRecCursor rcvRecCursor_t; CURSOR rcvRecCursor1_c( rmanCmd IN binary_integer ) RETURN rcvRec_t IS -- Offline Ranges, selected only for current incarnation. SELECT offlineRangeRec_con_t type_con, offr.offr_key key_con, offr.offr_recid recid_con, offr.offr_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, to_number(null) blocks_con, to_number(null) blockSize_con, to_char(null) deviceType_con, to_date(null) compTime_con, offr.cf_create_time cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, offlineRange_act_t type_act, offr.offline_scn fromSCN_act, offr.online_scn toSCN_act, offr.online_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, offr.file# dfNumber_obj, offr.create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, 0 site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, offr.create_scn newDfCreationSCN_obj, offr.online_scn newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM offr, dbinc WHERE (tc_database = TRUE# OR isTranslatedFno(offr.file#) = TRUE#) AND (untilSCN is null OR offr.online_scn < untilSCN) -- If online_scn = untilSCN, don't -- apply the offline range. The -- dictionary txn that commits the -- online occurred at a higher scn, -- and recovery will probably stop -- before getting to commit redo. AND offr.cf_create_time is not null -- offr recs added before 8.0.3 useless AND offr.offr_stamp <> 0 -- stamp = 0 -> from kccfe AND offr.dbinc_key = this_dbinc_key AND offr.dbinc_key = dbinc.dbinc_key UNION ALL -- Datafile Copies, may get backup of older incarnations SELECT imageCopy_con_t type_con, cdf.cdf_key key_con, cdf.cdf_recid recid_con, cdf.cdf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, cdf.fname fileName_con, cdf.tag tag_con, to_number(null) copyNumber_con, cdf.status status_con, cdf.blocks blocks_con, cdf.block_size blockSize_con, 'DISK' deviceType_con, cdf.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, cdf.ckp_scn toSCN_act, cdf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, dbinc.dbinc_key dbincKey_act, cdf.incr_level level_act, 0 section_size_act, cdf.file# dfNumber_obj, cdf.create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, cdf.keep_options keep_options, cdf.keep_until keep_until, cdf.abs_fuzzy_scn afzSCN_act, cdf.rcv_fuzzy_time rfzTime_act, cdf.rcv_fuzzy_scn rfzSCN_act, to_char(null) media_con, cdf.is_recovery_dest_file isrdf_con, site_key site_key_con, cdf.foreign_dbid foreignDbid_obj, decode(cdf.plugged_readonly, 'YES', 1, 0) pluggedRonly_obj, cdf.plugin_scn pluginSCN_obj, cdf.plugin_reset_scn pluginRlgSCN_obj, cdf.plugin_reset_time pluginRlgTime_obj, decode(cdf.plugin_scn, 0, cdf.create_scn, cdf.plugin_scn) newDfCreation_obj, decode(cdf.plugged_readonly, 'NO', cdf.ckp_scn, cdf.plugin_scn) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM cdf, dbinc WHERE (tc_database = TRUE# OR isTranslatedFno(cdf.file#) = TRUE#) AND (untilSCN is null OR (cdf.plugged_readonly = 'NO' AND greatest(cdf.ckp_scn, cdf.abs_fuzzy_scn, cdf.rcv_fuzzy_scn) <= untilSCN) OR (cdf.plugged_readonly = 'YES' AND cdf.plugin_scn <= untilSCN)) AND cdf.status = 'A' AND (restoreSource is NULL OR bitand(restoreSource, imageCopy_con_t) != 0) AND cdf.dbinc_key = dbinc.dbinc_key AND dbinc.db_key = this_db_key AND rmanCmd != recoverCmd_t AND ((user_site_key = cdf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(cdf.site_key, this_site_key))))) UNION ALL -- Controlfile Copies, select for only current incarnation SELECT imageCopy_con_t type_con, ccf.ccf_key key_con, ccf.ccf_recid recid_con, ccf.ccf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, ccf.fname fileName_con, ccf.tag tag_con, to_number(null) copyNumber_con, ccf.status status_con, to_number(null) blocks_con, ccf.block_size blockSize_con, 'DISK' deviceType_con, ccf.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, ccf.ckp_scn toSCN_act, ccf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, nvl(ccf.controlfile_type, 'B') cfType_obj, ccf.keep_options keep_options, ccf.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, is_recovery_dest_file isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, 0 newDfCreationSCN_obj, ccf.ckp_scn newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM ccf, dbinc WHERE (tc_database = TRUE# OR isTranslatedFno(0) = TRUE#) AND (untilSCN is null OR ccf.ckp_scn <= untilSCN) AND ccf.status = 'A' AND (restoreSource is NULL OR bitand(restoreSource, imageCopy_con_t) != 0) AND ccf.dbinc_key = dbinc.dbinc_key AND dbinc.db_key = this_db_key AND rmanCmd NOT IN (recoverCmd_t, blkRestoreCmd_t) AND ((user_site_key = ccf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(ccf.site_key, this_site_key))))) UNION ALL -- Backup Sets(for non-report obsolete command), may select all -- incarnation backups SELECT backupSet_con_t type_con, bdf.bdf_key key_con, bdf.bdf_recid recid_con, bdf.bdf_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, bs.incr_level bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, bdf.blocks blocks_con, bdf.block_size blockSize_con, to_char(null) deviceType_con, bdf.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, decode(bdf.incr_scn, 0, full_act_t, bdf.create_scn, decode(rmanCmd, rcvCopyCmd_t, incremental_act_t, recoverCmd_t, incremental_act_t, full_act_t), incremental_act_t) type_act, decode(bdf.incr_scn, bdf.create_scn, decode(rmanCmd, rcvCopyCmd_t, bdf.incr_scn, recoverCmd_t, bdf.incr_scn, 0), bdf.incr_scn) fromSCN_act, bdf.ckp_scn toSCN_act, bdf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, dbinc.dbinc_key dbincKey_act, bdf.incr_level level_act, bdf.section_size section_size_act, bdf.file# dfNumber_obj, bdf.create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, bdf.abs_fuzzy_scn afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, bdf.foreign_dbid foreignDbid_obj, decode(bdf.plugged_readonly, 'YES', 1, 0) pluggedRonly_obj, bdf.plugin_scn pluginSCN_obj, bdf.plugin_reset_scn pluginRlgSCN_obj, bdf.plugin_reset_time pluginRlgTime_obj, decode(bdf.plugin_scn, 0, bdf.create_scn, bdf.plugin_scn) newDfCreationSCN_obj, decode(bdf.plugged_readonly, 'NO', bdf.ckp_scn, bdf.plugin_scn) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM bdf, bs, dbinc -- NOTE!! NOTE!! NOTE!! -- If you add/change conditional clause, then make sure you do the -- same change in 'Backup sets (for report obsolete cmd)' query. WHERE rmanCmd != obsoleteCmd_t AND (tc_database = TRUE# OR isTranslatedFno(bdf.file#) = TRUE#) AND bs.status = 'A' AND bs.bck_type != 'L' -- only datafile backups AND (untilSCN IS NULL OR (bdf.plugged_readonly = 'NO' AND greatest(bdf.ckp_scn, bdf.abs_fuzzy_scn) <= untilSCN) OR (bdf.plugged_readonly = 'YES' AND bdf.plugin_scn <= untilSCN)) AND dbinc.dbinc_key = bdf.dbinc_key -- join dbinc, bdf AND bs.bs_key = bdf.bs_key -- join bs, bdf AND dbinc.db_key = this_db_key AND (rmanCmd = rcvCopyCmd_t OR -- use incr for recover copy cmd rmanCmd = recoverCmd_t OR (restoreSource IS NULL OR bitand(restoreSource, backupSet_con_t) != 0)) AND ((rmanCmd = rcvCopyCmd_t AND bdf.incr_scn > 0) OR (rmanCmd = recoverCmd_t AND bdf.incr_scn > 0) OR (rmanCmd IN (restoreCmd_t, blkRestoreCmd_t) AND bdf.incr_scn <= bdf.create_scn) OR (rmanCmd = unknownCmd_t)) AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) UNION ALL -- Backup Sets(for report obsolete command), may select all incarnation -- backups SELECT backupSet_con_t type_con, bdf.bdf_key key_con, bdf.bdf_recid recid_con, bdf.bdf_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, bs.incr_level bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, bdf.blocks blocks_con, bdf.block_size blockSize_con, to_char(null) deviceType_con, bdf.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, decode(bdf.incr_scn, 0, full_act_t, bdf.create_scn, full_act_t, incremental_act_t) type_act, bdf.incr_scn fromSCN_act, bdf.ckp_scn toSCN_act, bdf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, dbinc.dbinc_key dbincKey_act, bdf.incr_level level_act, bdf.section_size section_size_act, bdf.file# dfNumber_obj, bdf.create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, bdf.abs_fuzzy_scn afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, bdf.foreign_dbid foreignDbid_obj, decode(bdf.plugged_readonly, 'YES', 1, 0) pluggedRonly_obj, bdf.plugin_scn pluginSCN_obj, bdf.plugin_reset_scn pluginRlgSCN_obj, bdf.plugin_reset_time pluginRlgTime_obj, decode(bdf.plugin_scn, 0, bdf.create_scn, bdf.plugin_scn) newDfCreationSCN_obj, decode(bdf.plugged_readonly, 'NO', bdf.ckp_scn, bdf.plugin_scn) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM bdf, bs, dbinc, (SELECT bs_key, count(distinct piece#) pieces FROM bp WHERE rmanCmd = obsoleteCmd_t AND bp.db_key = this_db_key -- this database AND bp.status = 'A' AND (anyDevice = TRUE# OR isDeviceTypeAllocated(bp.device_type) = TRUE#) AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared =TRUE# AND bp.device_type='DISK') OR (tape_backups_shared =TRUE# AND bp.device_type<>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs_key, device_type) bp -- NOTE!! NOTE!! NOTE!! -- If you add/change conditional clause, then make sure you do the -- same change in 'Backup Sets (for non-report obsolete cmd)' query. WHERE rmanCmd = obsoleteCmd_t -- For sure, istranslatedFno is true for obsoleteCmd_t. Then, why -- call it unnecessary? -- AND (tc_database = TRUE# OR isTranslatedFno(bdf.file#) = TRUE#) AND bs.status = 'A' AND bs.bck_type != 'L' -- only datafile backups AND (untilSCN IS NULL OR (bdf.plugged_readonly = 'NO' AND greatest(bdf.ckp_scn, bdf.abs_fuzzy_scn) <= untilSCN) OR (bdf.plugged_readonly = 'YES' AND bdf.plugin_scn <= untilSCN)) AND dbinc.dbinc_key = bdf.dbinc_key -- join dbinc, bdf AND bs.bs_key = bdf.bs_key -- join bs, bdf AND dbinc.db_key = this_db_key AND (restoreSource IS NULL OR bitand(restoreSource, backupSet_con_t) != 0) AND bp.bs_key = bs.bs_key AND bp.pieces = bs.pieces UNION ALL -- Backup Sets (for non-report obsolete command) with controlfiles, -- select only for current incarnation SELECT backupSet_con_t type_con, bcf.bcf_key key_con, bcf.bcf_recid recid_con, bcf.bcf_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, 0 bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, to_number(null) blocks_con, bcf.block_size blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, full_act_t type_act, 0 fromSCN_act, bcf.ckp_scn toSCN_act, bcf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, nvl(bcf.controlfile_type, 'B') cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, 0 newDfCreationSCN_obj, bcf.ckp_scn newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM bcf, bs, dbinc -- NOTE!! NOTE!! NOTE!! -- If you add/change conditional clause, then make sure you do the -- same change in 'Backup sets (for report obsolete cmd)' query. WHERE rmanCmd != obsoleteCmd_t AND (tc_database = TRUE# OR isTranslatedFno(0) = TRUE#) AND bs.status = 'A' AND bs.bck_type != 'L' -- ignore archivelog backups AND (untilSCN IS NULL OR bcf.ckp_scn <= untilSCN) AND (restoreSource IS NULL OR bitand(restoreSource, backupSet_con_t) != 0) AND dbinc.dbinc_key = bcf.dbinc_key -- join dbinc, bcf AND bs.bs_key = bcf.bs_key -- join bs, bcf AND dbinc.db_key = this_db_key AND rmanCmd NOT IN (recoverCmd_t, blkRestoreCmd_t) AND (bs.site_key IS NULL OR -- always return null site_key user_site_key = bs.site_key OR -- user interested in one site (user_site_key IS NULL AND -- return rows per access attr (disk_backups_shared = TRUE# OR tape_backups_shared = TRUE# OR this_site_key = bs.site_key))) UNION ALL -- Backup Sets (for report obsolete command) with controlfiles, -- select only for current incarnation SELECT backupSet_con_t type_con, bcf.bcf_key key_con, bcf.bcf_recid recid_con, bcf.bcf_stamp stamp_con, bs.set_stamp setStamp_con, bs.set_count setCount_con, bs.bs_recid bsRecid_con, bs.bs_stamp bsStamp_con, bs.bs_key bsKey_con, 0 bsLevel_con, bs.bck_type bsType_con, abs((bs.completion_time - bs.start_time) * 86400) elapseSecs_con, bs.pieces pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, to_number(null) blocks_con, bcf.block_size blockSize_con, to_char(null) deviceType_con, bs.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, multi_section multi_section_con, full_act_t type_act, 0 fromSCN_act, bcf.ckp_scn toSCN_act, bcf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, nvl(bcf.controlfile_type, 'B') cfType_obj, bs.keep_options keep_options, bs.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, bs.site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, 0 newDfCreationSCN_obj, bcf.ckp_scn newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM bcf, bs, dbinc, (SELECT bs_key, count(distinct piece#) pieces FROM bp WHERE rmanCmd = obsoleteCmd_t AND bp.db_key = this_db_key -- this database AND bp.status = 'A' AND (anyDevice = TRUE# OR isDeviceTypeAllocated(bp.device_type) = TRUE#) AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared =TRUE# AND bp.device_type ='DISK') OR (tape_backups_shared =TRUE# AND bp.device_type<>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs_key, device_type) bp -- NOTE!! NOTE!! NOTE!! -- If you add/change conditional clause, then make sure you do the -- same change in 'Backup Sets (for non-report obsolete cmd)' query. WHERE rmanCmd = obsoleteCmd_t -- For sure, istranslatedFno is true for obsoleteCmd_t. Then, why -- call it unnecessary? -- AND (tc_database = TRUE# OR isTranslatedFno(0) = TRUE#) AND bs.status = 'A' AND bs.bck_type != 'L' -- ignore archivelog backups AND (untilSCN IS NULL OR bcf.ckp_scn <= untilSCN) AND (restoreSource IS NULL OR bitand(restoreSource, backupSet_con_t) != 0) AND dbinc.dbinc_key = bcf.dbinc_key -- join dbinc, bcf AND bs.bs_key = bcf.bs_key -- join bs, bcf AND dbinc.db_key = this_db_key AND bs.bs_key = bp.bs_key -- join bp, bs AND bp.pieces = bs.pieces UNION ALL -- Proxy Datafile Backups SELECT proxyCopy_con_t type_con, xdf.xdf_key key_con, xdf.xdf_recid recid_con, xdf.xdf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, xdf.handle fileName_con, xdf.tag tag_con, to_number(null) copyNumber_con, xdf.status status_con, xdf.blocks blocks_con, xdf.block_size blockSize_con, xdf.device_type deviceType_con, xdf.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, xdf.ckp_scn toSCN_act, xdf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, xdf.file# dfNumber_obj, xdf.create_scn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, keep_options keep_options, keep_until keep_until, xdf.abs_fuzzy_scn afzSCN_act, xdf.rcv_fuzzy_time rfzTime_act, xdf.rcv_fuzzy_scn rfzSCN_act, xdf.media media_con, 'NO' isrdf_con, site_key site_key_con, xdf.foreign_dbid foreignDbid_obj, decode(xdf.plugged_readonly, 'YES', 1, 0) pluggedRonly_obj, xdf.plugin_scn pluginSCN_obj, xdf.plugin_reset_scn pluginRlgSCN_obj, xdf.plugin_reset_time pluginRlgTime_obj, decode(xdf.plugin_scn, 0, xdf.create_scn, xdf.plugin_scn) newDfCreationSCN_obj, decode(xdf.plugged_readonly, 'NO', xdf.ckp_scn, xdf.plugin_scn) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xdf, dbinc WHERE (tc_database = TRUE# OR isTranslatedFno(xdf.file#) = TRUE#) AND (untilSCN IS NULL OR (xdf.plugged_readonly = 'NO' AND greatest(xdf.ckp_scn, xdf.abs_fuzzy_scn, xdf.rcv_fuzzy_scn) <= untilSCN) OR (xdf.plugged_readonly = 'YES' AND xdf.plugin_scn <= untilSCN)) AND xdf.status = 'A' AND (restoreSource is NULL OR bitand(restoreSource, proxyCopy_con_t) != 0) AND dbinc.db_key = this_db_key AND xdf.dbinc_key = dbinc.dbinc_key AND rmanCmd NOT IN (recoverCmd_t, blkRestoreCmd_t) AND ((user_site_key = xdf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xdf.site_key, this_site_key))))) UNION ALL -- Proxy Controlfile Backups, select only for current incarnation SELECT proxyCopy_con_t type_con, xcf.xcf_key key_con, xcf.xcf_recid recid_con, xcf.xcf_stamp stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, xcf.handle fileName_con, xcf.tag tag_con, to_number(null) copyNumber_con, xcf.status status_con, to_number(null) blocks_con, xcf.block_size blockSize_con, xcf.device_type deviceType_con, xcf.completion_time compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, full_act_t type_act, 0 fromSCN_act, xcf.ckp_scn toSCN_act, xcf.ckp_time toTime_act, dbinc.reset_scn rlgSCN_act, dbinc.reset_time rlgTime_act, dbinc.dbinc_key dbincKey_act, to_number(null) level_act, 0 section_size_act, 0 dfNumber_obj, 0 dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, nvl(xcf.controlfile_type, 'B') cfType_obj, xcf.keep_options keep_options, xcf.keep_until keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, xcf.media media_con, 'NO' isrdf_con, site_key site_key_con, 0 foreignDbid_obj, 0 pluggedRonly_obj, 0 pluginSCN_obj, 0 pluginRlgSCN_obj, to_date(null) pluginRlgTime_obj, 0 newDfCreationSCN_obj, xcf.ckp_scn newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM xcf, dbinc WHERE (tc_database = TRUE# OR isTranslatedFno(0) = TRUE#) AND (untilSCN IS NULL OR xcf.ckp_scn <= untilSCN) AND xcf.status = 'A' AND (restoreSource is NULL OR bitand(restoreSource, proxyCopy_con_t) != 0) AND dbinc.db_key = this_db_key AND xcf.dbinc_key = dbinc.dbinc_key AND rmanCmd NOT IN (recoverCmd_t, blkRestoreCmd_t) AND ((user_site_key = xcf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xcf.site_key, this_site_key))))) ORDER BY dfNumber_obj asc, newDfCreationSCN_obj asc, newToSCN_act desc, fromSCN_act asc, type_con asc, stamp_con desc; -- Filter result of rcvRecCursor1_c CURSOR rcvRecCursor1Filter_c( dbincKey IN number ,fno IN number ,creSCN IN number ,dfCkpSCN IN number ,dbincRlgSCN IN number ,dbincRlgTime IN date ,offlSCN IN number ,onlSCN IN number ,onlTime IN date ,cleanSCN IN number ,clean2SCN IN number ,clean2Time IN date ,targetSCN IN number ,c1rec IN rcvRec_t ,foreignDbid IN number ,pluggedRonly IN binary_integer ,pluginSCN IN number ,pluginRlgSCN IN number ,pluginRlgTime IN date ,rmanCmd IN binary_integer) RETURN rcvRec_t IS -- Offline Ranges SELECT c1rec.type_con type_con, c1rec.key_con key_con, c1rec.recid_con recid_con, c1rec.stamp_con stamp_con, c1rec.setStamp_con setStamp_con, c1rec.setCount_con setCount_con, c1rec.bsRecid_con bsRecid_con, c1rec.bsStamp_con bsStamp_con, c1rec.bsKey_con bsKey_con, c1rec.bsLevel_con bsLevel_con, c1rec.bsType_con bsType_con, c1rec.elapseSecs_con elapseSecs_con, c1rec.pieceCount_con pieceCount_con, c1rec.fileName_con fileName_con, c1rec.tag_con tag_con, c1rec.copyNumber_con copyNumber_con, c1rec.status_con status_con, c1rec.blocks_con blocks_con, c1rec.blockSize_con blockSize_con, c1rec.deviceType_con deviceType_con, c1rec.compTime_con compTime_con, c1rec.cfCreationTime_con cfCreationTime_con, c1rec.pieceNumber_con pieceNumber_con, c1rec.bpCompTime_con bpCompTime_con, c1rec.bpCompressed_con bpCompressed_con, c1rec.multi_section_con multi_section_con, c1rec.type_act type_act, c1rec.fromSCN_act fromSCN_act, c1rec.toSCN_act toSCN_act, c1rec.toTime_act toTime_act, c1rec.rlgSCN_act rlgSCN_act, c1rec.rlgTime_act rlgTime_act, c1rec.dbincKey_act dbincKey_act, c1rec.level_act level_act, c1rec.section_size_act section_size_act, c1rec.dfNumber_obj dfNumber_obj, c1rec.dfCreationSCN_obj dfCreationSCN_obj, c1rec.cfSequence_obj cfSequence_obj, c1rec.cfDate_obj cfDate_obj, c1rec.logSequence_obj logSequence_obj, c1rec.logThread_obj logThread_obj, c1rec.logRlgSCN_obj logRlgSCN_obj, c1rec.logRlgTime_obj logRlgTime_obj, c1rec.logLowSCN_obj logLowSCN_obj, c1rec.logLowTime_obj logLowTime_obj, c1rec.logNextSCN_obj logNextSCN_obj, c1rec.logNextTime_obj logNextTime_obj, c1rec.logTerminal_obj logTerminal_obj, c1rec.cfType_obj cfType_obj, c1rec.keep_options keep_options, c1rec.keep_until keep_until, c1rec.afzSCN_act afzSCN_act, c1rec.rfzTime_act rfzTime_act, c1rec.rfzSCN_act rfzSCN_act, c1rec.media_con media_con, c1rec.isrdf_con isrdf_con, c1rec.site_key_con site_key_con, c1rec.foreignDbid_obj foreignDbid_obj, c1rec.pluggedRonly_obj pluggedRonly_obj, c1rec.pluginSCN_obj pluginSCN_obj, c1rec.pluginRlgSCN_obj pluginRlgSCN_obj, c1rec.pluginRlgTime_obj pluginRlgTime_obj, c1rec.newDfCreationSCN_obj newDfCreationSCN_obj, c1rec.newToSCN_act newToSCN_act, c1rec.newRlgSCN_act newRlgSCN_act, c1rec.newRlgTime_act newRlgTime_act, c1rec.sfDbUniqueName_obj sfDbUniqueName_obj FROM dual WHERE c1rec.type_con = offlineRangeRec_con_t AND rcvRecCursor1Filter_c.pluggedRonly = 0 AND c1rec.dbincKey_act = rcvRecCursor1Filter_c.dbincKey AND (rcvRecCursor1Filter_c.dfCkpSCN is null OR rcvRecCursor1Filter_c.dfCkpSCN <= c1rec.fromSCN_act) AND (rcvRecCursor1Filter_c.targetSCN is null OR c1rec.toSCN_act <= rcvRecCursor1Filter_c.targetSCN) UNION ALL -- Datafile copies SELECT c1rec.type_con type_con, c1rec.key_con key_con, c1rec.recid_con recid_con, c1rec.stamp_con stamp_con, c1rec.setStamp_con setStamp_con, c1rec.setCount_con setCount_con, c1rec.bsRecid_con bsRecid_con, c1rec.bsStamp_con bsStamp_con, c1rec.bsKey_con bsKey_con, c1rec.bsLevel_con bsLevel_con, c1rec.bsType_con bsType_con, c1rec.elapseSecs_con elapseSecs_con, c1rec.pieceCount_con pieceCount_con, c1rec.fileName_con fileName_con, c1rec.tag_con tag_con, c1rec.copyNumber_con copyNumber_con, c1rec.status_con status_con, c1rec.blocks_con blocks_con, c1rec.blockSize_con blockSize_con, c1rec.deviceType_con deviceType_con, c1rec.compTime_con compTime_con, c1rec.cfCreationTime_con cfCreationTime_con, c1rec.pieceNumber_con pieceNumber_con, c1rec.bpCompTime_con bpCompTime_con, c1rec.bpCompressed_con bpCompressed_con, c1rec.multi_section_con multi_section_con, c1rec.type_act type_act, c1rec.fromSCN_act fromSCN_act, c1rec.toSCN_act toSCN_act, c1rec.toTime_act toTime_act, c1rec.rlgSCN_act rlgSCN_act, c1rec.rlgTime_act rlgTime_act, c1rec.dbincKey_act dbincKey_act, c1rec.level_act level_act, c1rec.section_size_act section_size_act, c1rec.dfNumber_obj dfNumber_obj, c1rec.dfCreationSCN_obj dfCreationSCN_obj, c1rec.cfSequence_obj cfSequence_obj, c1rec.cfDate_obj cfDate_obj, c1rec.logSequence_obj logSequence_obj, c1rec.logThread_obj logThread_obj, c1rec.logRlgSCN_obj logRlgSCN_obj, c1rec.logRlgTime_obj logRlgTime_obj, c1rec.logLowSCN_obj logLowSCN_obj, c1rec.logLowTime_obj logLowTime_obj, c1rec.logNextSCN_obj logNextSCN_obj, c1rec.logNextTime_obj logNextTime_obj, c1rec.logTerminal_obj logTerminal_obj, c1rec.cfType_obj cfType_obj, c1rec.keep_options keep_options, c1rec.keep_until keep_until, c1rec.afzSCN_act afzSCN_act, c1rec.rfzTime_act rfzTime_act, c1rec.rfzSCN_act rfzSCN_act, c1rec.media_con media_con, c1rec.isrdf_con isrdf_con, c1rec.site_key_con site_key_con, c1rec.foreignDbid_obj foreignDbid_obj, c1rec.pluggedRonly_obj pluggedRonly_obj, c1rec.pluginSCN_obj pluginSCN_obj, c1rec.pluginRlgSCN_obj pluginRlgSCN_obj, c1rec.pluginRlgTime_obj pluginRlgTime_obj, c1rec.newDfCreationSCN_obj newDfCreationSCN_obj, c1rec.newToSCN_act newToSCN_act, c1rec.newRlgSCN_act newRlgSCN_act, c1rec.newRlgTime_act newRlgTime_act, c1rec.sfDbUniqueName_obj sfDbUniqueName_obj FROM dual WHERE c1rec.type_con = imageCopy_con_t AND ((canApplyAnyRedo = TRUE# AND c1rec.dfNumber_obj <> 0) OR (craGetAllCfBackups = TRUE# AND c1rec.dfNumber_obj = 0) OR (c1rec.pluggedRonly_obj = 0 AND c1rec.dbincKey_act = rcvRecCursor1Filter_c.dbincKey) OR (c1rec.pluggedRonly_obj != 0 AND c1rec.pluginRlgSCN_obj = rcvRecCursor1Filter_c.dbincRlgSCN AND c1rec.pluginRlgTime_obj = rcvRecCursor1Filter_c.dbincRlgTime)) AND (rcvRecCursor1Filter_c.dfCkpSCN is null OR rmanCmd = blkRestoreCmd_t OR rcvRecCursor1Filter_c.dfCkpSCN <= c1rec.toSCN_act) AND (rcvRecCursor1Filter_c.targetSCN is null OR (c1rec.pluggedRonly_obj = 0 AND c1rec.toSCN_act <= rcvRecCursor1Filter_c.targetSCN) OR (c1rec.pluggedRonly_obj != 0 AND c1rec.pluginSCN_obj <= rcvRecCursor1Filter_c.targetSCN)) AND (restoreTag is NULL OR c1rec.tag_con = restoreTag OR computeRA_allRecords = TRUE#) AND ((c1rec.foreignDbid_obj = rcvRecCursor1Filter_c.foreignDbid) OR (c1rec.foreignDbid_obj = 0 AND c1rec.pluginSCN_obj = 0 AND rcvRecCursor1Filter_c.pluginSCN = 0)) -- pre-11 plugin files does not have -- plugin scn after it is made read-write -- but has foreign dbid UNION ALL -- Backup Sets SELECT c1rec.type_con type_con, c1rec.key_con key_con, c1rec.recid_con recid_con, c1rec.stamp_con stamp_con, c1rec.setStamp_con setStamp_con, c1rec.setCount_con setCount_con, c1rec.bsRecid_con bsRecid_con, c1rec.bsStamp_con bsStamp_con, c1rec.bsKey_con bsKey_con, c1rec.bsLevel_con bsLevel_con, c1rec.bsType_con bsType_con, c1rec.elapseSecs_con elapseSecs_con, c1rec.pieceCount_con pieceCount_con, c1rec.fileName_con fileName_con, c1rec.tag_con tag_con, c1rec.copyNumber_con copyNumber_con, c1rec.status_con status_con, c1rec.blocks_con blocks_con, c1rec.blockSize_con blockSize_con, c1rec.deviceType_con deviceType_con, c1rec.compTime_con compTime_con, c1rec.cfCreationTime_con cfCreationTime_con, c1rec.pieceNumber_con pieceNumber_con, c1rec.bpCompTime_con bpCompTime_con, c1rec.bpCompressed_con bpCompressed_con, c1rec.multi_section_con multi_section_con, c1rec.type_act type_act, c1rec.fromSCN_act fromSCN_act, c1rec.toSCN_act toSCN_act, c1rec.toTime_act toTime_act, c1rec.rlgSCN_act rlgSCN_act, c1rec.rlgTime_act rlgTime_act, c1rec.dbincKey_act dbincKey_act, c1rec.level_act level_act, c1rec.section_size_act section_size_act, c1rec.dfNumber_obj dfNumber_obj, c1rec.dfCreationSCN_obj dfCreationSCN_obj, c1rec.cfSequence_obj cfSequence_obj, c1rec.cfDate_obj cfDate_obj, c1rec.logSequence_obj logSequence_obj, c1rec.logThread_obj logThread_obj, c1rec.logRlgSCN_obj logRlgSCN_obj, c1rec.logRlgTime_obj logRlgTime_obj, c1rec.logLowSCN_obj logLowSCN_obj, c1rec.logLowTime_obj logLowTime_obj, c1rec.logNextSCN_obj logNextSCN_obj, c1rec.logNextTime_obj logNextTime_obj, c1rec.logTerminal_obj logTerminal_obj, c1rec.cfType_obj cfType_obj, c1rec.keep_options keep_options, c1rec.keep_until keep_until, c1rec.afzSCN_act afzSCN_act, c1rec.rfzTime_act rfzTime_act, c1rec.rfzSCN_act rfzSCN_act, c1rec.media_con media_con, c1rec.isrdf_con isrdf_con, c1rec.site_key_con site_key_con, c1rec.foreignDbid_obj foreignDbid_obj, c1rec.pluggedRonly_obj pluggedRonly_obj, c1rec.pluginSCN_obj pluginSCN_obj, c1rec.pluginRlgSCN_obj pluginRlgSCN_obj, c1rec.pluginRlgTime_obj pluginRlgTime_obj, c1rec.newDfCreationSCN_obj newDfCreationSCN_obj, c1rec.newToSCN_act newToSCN_act, c1rec.newRlgSCN_act newRlgSCN_act, c1rec.newRlgTime_act newRlgTime_act, c1rec.sfDbUniqueName_obj sfDbUniqueName_obj FROM dual WHERE c1rec.type_con = backupSet_con_t AND ((canApplyAnyRedo = TRUE# AND c1rec.dfNumber_obj <> 0) OR (craGetAllCfBackups = TRUE# AND c1rec.dfNumber_obj = 0) OR (c1rec.pluggedRonly_obj = 0 AND c1rec.dbincKey_act = rcvRecCursor1Filter_c.dbincKey) OR (c1rec.pluggedRonly_obj != 0 AND c1rec.pluginRlgSCN_obj = rcvRecCursor1Filter_c.dbincRlgSCN AND c1rec.pluginRlgTime_obj = rcvRecCursor1Filter_c.dbincRlgTime)) AND (rcvRecCursor1Filter_c.dfCkpSCN IS NULL OR rmanCmd = blkRestoreCmd_t OR rcvRecCursor1Filter_c.dfCkpSCN < c1rec.toSCN_act) AND (rcvRecCursor1Filter_c.targetSCN IS NULL OR (c1rec.pluggedRonly_obj = 0 AND c1rec.toSCN_act <= rcvRecCursor1Filter_c.targetSCN) OR (c1rec.pluggedRonly_obj != 0 AND c1rec.pluginSCN_obj <= rcvRecCursor1Filter_c.targetSCN)) AND ((c1rec.foreignDbid_obj = rcvRecCursor1Filter_c.foreignDbid) OR (c1rec.foreignDbid_obj = 0 AND c1rec.pluginSCN_obj = 0 AND rcvRecCursor1Filter_c.pluginSCN = 0)) -- pre-11 plugin files does not have -- plugin scn after it is made read-write -- but has foreign dbid UNION ALL -- Proxy Datafile Backups SELECT c1rec.type_con type_con, c1rec.key_con key_con, c1rec.recid_con recid_con, c1rec.stamp_con stamp_con, c1rec.setStamp_con setStamp_con, c1rec.setCount_con setCount_con, c1rec.bsRecid_con bsRecid_con, c1rec.bsStamp_con bsStamp_con, c1rec.bsKey_con bsKey_con, c1rec.bsLevel_con bsLevel_con, c1rec.bsType_con bsType_con, c1rec.elapseSecs_con elapseSecs_con, c1rec.pieceCount_con pieceCount_con, c1rec.fileName_con fileName_con, c1rec.tag_con tag_con, c1rec.copyNumber_con copyNumber_con, c1rec.status_con status_con, c1rec.blocks_con blocks_con, c1rec.blockSize_con blockSize_con, c1rec.deviceType_con deviceType_con, c1rec.compTime_con compTime_con, c1rec.cfCreationTime_con cfCreationTime_con, c1rec.pieceNumber_con pieceNumber_con, c1rec.bpCompTime_con bpCompTime_con, c1rec.bpCompressed_con bpCompressed_con, c1rec.multi_section_con multi_section_con, c1rec.type_act type_act, c1rec.fromSCN_act fromSCN_act, c1rec.toSCN_act toSCN_act, c1rec.toTime_act toTime_act, c1rec.rlgSCN_act rlgSCN_act, c1rec.rlgTime_act rlgTime_act, c1rec.dbincKey_act dbincKey_act, c1rec.level_act level_act, c1rec.section_size_act section_size_act, c1rec.dfNumber_obj dfNumber_obj, c1rec.dfCreationSCN_obj dfCreationSCN_obj, c1rec.cfSequence_obj cfSequence_obj, c1rec.cfDate_obj cfDate_obj, c1rec.logSequence_obj logSequence_obj, c1rec.logThread_obj logThread_obj, c1rec.logRlgSCN_obj logRlgSCN_obj, c1rec.logRlgTime_obj logRlgTime_obj, c1rec.logLowSCN_obj logLowSCN_obj, c1rec.logLowTime_obj logLowTime_obj, c1rec.logNextSCN_obj logNextSCN_obj, c1rec.logNextTime_obj logNextTime_obj, c1rec.logTerminal_obj logTerminal_obj, c1rec.cfType_obj cfType_obj, c1rec.keep_options keep_options, c1rec.keep_until keep_until, c1rec.afzSCN_act afzSCN_act, c1rec.rfzTime_act rfzTime_act, c1rec.rfzSCN_act rfzSCN_act, c1rec.media_con media_con, c1rec.isrdf_con isrdf_con, c1rec.site_key_con site_key_con, c1rec.foreignDbid_obj foreignDbid_obj, c1rec.pluggedRonly_obj pluggedRonly_obj, c1rec.pluginSCN_obj pluginSCN_obj, c1rec.pluginRlgSCN_obj pluginRlgSCN_obj, c1rec.pluginRlgTime_obj pluginRlgTime_obj, c1rec.newDfCreationSCN_obj newDfCreationSCN_obj, c1rec.newToSCN_act newToSCN_act, c1rec.newRlgSCN_act newRlgSCN_act, c1rec.newRlgTime_act newRlgTime_act, c1rec.sfDbUniqueName_obj sfDbUniqueName_obj FROM dual WHERE c1rec.type_con = proxyCopy_con_t AND rmanCmd != blkRestoreCmd_t AND ((canApplyAnyRedo = TRUE# AND c1rec.dfNumber_obj <> 0) OR (craGetAllCfBackups = TRUE# AND c1rec.dfNumber_obj = 0) OR (c1rec.pluggedRonly_obj = 0 AND c1rec.dbincKey_act = rcvRecCursor1Filter_c.dbincKey) OR (c1rec.pluggedRonly_obj != 0 AND c1rec.pluginRlgSCN_obj = rcvRecCursor1Filter_c.dbincRlgSCN AND c1rec.pluginRlgTime_obj = rcvRecCursor1Filter_c.dbincRlgTime)) AND (rcvRecCursor1Filter_c.dfCkpSCN IS NULL OR rcvRecCursor1Filter_c.dfCkpSCN < c1rec.toSCN_act) AND (rcvRecCursor1Filter_c.targetSCN IS NULL OR (c1rec.pluggedRonly_obj = 0 AND c1rec.toSCN_act <= rcvRecCursor1Filter_c.targetSCN) OR (c1rec.pluggedRonly_obj != 0 AND c1rec.pluginSCN_obj <= rcvRecCursor1Filter_c.targetSCN)) AND (restoreTag is NULL OR c1rec.tag_con = restoreTag OR computeRA_allRecords = TRUE#) AND ((c1rec.foreignDbid_obj = rcvRecCursor1Filter_c.foreignDbid) OR (c1rec.foreignDbid_obj = 0 AND c1rec.pluginSCN_obj = 0 AND rcvRecCursor1Filter_c.pluginSCN = 0)); -- pre-11 plugin files does not have -- plugin scn after it is made read-write -- but has foreign dbid CURSOR rcvRecCursor2_c( dbincKey IN number ,fno IN number ,creSCN IN number ,dfCkpSCN IN number ,dbincRlgSCN IN number ,dbincRlgTime IN date ,offlSCN IN number ,onlSCN IN number ,onlTime IN date ,cleanSCN IN number ,clean2SCN IN number ,clean2Time IN date ,targetSCN IN number ,c1frec IN rcvRec_t ,excludeAction IN binary_integer ,foreignDbid IN number ,pluggedRonly IN binary_integer ,pluginSCN IN number ,pluginRlgSCN IN number ,pluginRlgTime IN date) RETURN rcvRec_t IS -- Filtered rcvRecCursor1_c row SELECT c1frec.type_con type_con, c1frec.key_con key_con, c1frec.recid_con recid_con, c1frec.stamp_con stamp_con, c1frec.setStamp_con setStamp_con, c1frec.setCount_con setCount_con, c1frec.bsRecid_con bsRecid_con, c1frec.bsStamp_con bsStamp_con, c1frec.bsKey_con bsKey_con, c1frec.bsLevel_con bsLevel_con, c1frec.bsType_con bsType_con, c1frec.elapseSecs_con elapseSecs_con, c1frec.pieceCount_con pieceCount_con, c1frec.fileName_con fileName_con, c1frec.tag_con tag_con, c1frec.copyNumber_con copyNumber_con, c1frec.status_con status_con, c1frec.blocks_con blocks_con, c1frec.blockSize_con blockSize_con, c1frec.deviceType_con deviceType_con, c1frec.compTime_con compTime_con, c1frec.cfCreationTime_con cfCreationTime_con, c1frec.pieceNumber_con pieceNumber_con, c1frec.bpCompTime_con bpCompTime_con, c1frec.bpCompressed_con bpCompressed_con, c1frec.multi_section_con multi_section_con, c1frec.type_act type_act, c1frec.fromSCN_act fromSCN_act, c1frec.toSCN_act toSCN_act, c1frec.toTime_act toTime_act, c1frec.rlgSCN_act rlgSCN_act, c1frec.rlgTime_act rlgTime_act, c1frec.dbincKey_act dbincKey_act, c1frec.level_act level_act, c1frec.section_size_act section_size_act, c1frec.dfNumber_obj dfNumber_obj, c1frec.dfCreationSCN_obj dfCreationSCN_obj, c1frec.cfSequence_obj cfSequence_obj, c1frec.cfDate_obj cfDate_obj, c1frec.logSequence_obj logSequence_obj, c1frec.logThread_obj logThread_obj, c1frec.logRlgSCN_obj logRlgSCN_obj, c1frec.logRlgTime_obj logRlgTime_obj, c1frec.logLowSCN_obj logLowSCN_obj, c1frec.logLowTime_obj logLowTime_obj, c1frec.logNextSCN_obj logNextSCN_obj, c1frec.logNextTime_obj logNextTime_obj, c1frec.logTerminal_obj logTerminal_obj, c1frec.cfType_obj cfType_obj, c1frec.keep_options keep_options, c1frec.keep_until keep_until, c1frec.afzSCN_act afzSCN_act, c1frec.rfzTime_act rfzTime_act, c1frec.rfzSCN_act rfzSCN_act, c1frec.media_con media_con, c1frec.isrdf_con isrdf_con, c1frec.site_key_con site_key_con, c1frec.foreignDbid_obj foreignDbid_obj, c1frec.pluggedRonly_obj pluggedRonly_obj, c1frec.pluginSCN_obj pluginSCN_obj, c1frec.pluginRlgSCN_obj pluginRlgSCN_obj, c1frec.pluginRlgTime_obj pluginRlgTime_obj, c1frec.newDfCreationSCN_obj newDfCreationSCN_obj, c1frec.newToSCN_act newToSCN_act, c1frec.newRlgSCN_act newRlgSCN_act, c1frec.newRlgTime_act newRlgTime_act, c1frec.sfDbUniqueName_obj sfDbUniqueName_obj FROM dual WHERE c1frec.type_con is not null UNION ALL -- Implicit offline Ranges, range obtained from controlfile only if recovery -- target destination is same as the one that was last opened. SELECT offlineRangeRec_con_t type_con, to_number(null) key_con, to_number(null) recid_con, to_number(null) stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, to_number(null) blocks_con, to_number(null) blockSize_con, to_char(null) deviceType_con, to_date(null) compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, implicitRange_act_t type_act, rcvRecCursor2_c.offlSCN fromSCN_act, rcvRecCursor2_c.onlSCN toSCN_act, rcvRecCursor2_c.onlTime toTime_act, rcvRecCursor2_c.dbincRlgSCN rlgSCN_act, rcvRecCursor2_c.dbincRlgTime rlgTime_act, rcvRecCursor2_c.dbincKey dbincKey_act, to_number(null) level_act, 0 section_size_act, fno dfNumber_obj, crescn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, 0 site_key_con, rcvRecCursor2_c.foreignDbid foreignDbid_obj, rcvRecCursor2_c.pluggedRonly pluggedRonly_obj, rcvRecCursor2_c.pluginSCN pluginSCN_obj, rcvRecCursor2_c.pluginRlgSCN pluginRlgSCN_obj, rcvRecCursor2_c.pluginRlgTime pluginRlgTime_obj, crescn newDfCreationSCN_obj, rcvRecCursor2_c.onlSCN newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM dual WHERE bitand(rcvRecCursor2_c.excludeAction,implicitRange_act_t) = 0 AND rcvRecCursor2_c.pluggedRonly = 0 AND offlSCN <> 0 AND (rcvRecCursor2_c.dfCkpSCN is null OR rcvRecCursor2_c.dfCkpSCN <= rcvRecCursor2_c.offlSCN) AND (rcvRecCursor2_c.onlSCN >= -- belongs to this incarnation rcvRecCursor2_c.dbincRlgSCN) AND (rcvRecCursor2_c.targetSCN is null OR rcvRecCursor2_c.onlscn <= -- don't advance ckpt beyond rcvRecCursor2_c.targetSCN) -- targetSCN AND (untilSCN is null OR -- don't advance ckpt beyond until scn rcvRecCursor2_c.onlSCN < untilSCN) UNION ALL SELECT offlineRangeRec_con_t type_con, to_number(null) key_con, to_number(null) recid_con, to_number(null) stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, to_number(null) blocks_con, to_number(null) blockSize_con, to_char(null) deviceType_con, to_date(null) compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, cleanRange_act_t type_act, rcvRecCursor2_c.cleanSCN fromSCN_act, rcvRecCursor2_c.clean2SCN toSCN_act, rcvRecCursor2_c.clean2Time toTime_act, rcvRecCursor2_c.dbincRlgSCN rlgSCN_act, rcvRecCursor2_c.dbincRlgTime rlgTime_act, rcvRecCursor2_c.dbincKey dbincKey_act, to_number(null) level_act, 0 section_size_act, fno dfNumber_obj, crescn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, 0 site_key_con, rcvRecCursor2_c.foreignDbid foreignDbid_obj, rcvRecCursor2_c.pluggedRonly pluggedRonly_obj, rcvRecCursor2_c.pluginSCN pluginSCN_obj, rcvRecCursor2_c.pluginRlgSCN pluginRlgSCN_obj, rcvRecCursor2_c.pluginRlgTime pluginRlgTime_obj, crescn newDfCreationSCN_obj, rcvRecCursor2_c.clean2SCN newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM dual WHERE bitand(rcvRecCursor2_c.excludeAction, cleanRange_act_t) = 0 AND rcvRecCursor2_c.pluggedRonly = 0 AND rcvRecCursor2_c.cleanscn <> 0 AND (rcvRecCursor2_c.dfCkpSCN is null OR rcvRecCursor2_c.dfCkpSCN <= rcvRecCursor2_c.cleanscn) AND -- belongs to this incarnation (rcvRecCursor2_c.clean2scn >= rcvRecCursor2_c.dbincRlgSCN) AND -- ignore if starts beyond target (rcvRecCursor2_c.targetscn is null OR rcvRecCursor2_c.cleanscn < rcvRecCursor2_c.targetSCN) AND -- If clean2scn is infinite, then we processed this when scanning -- the current incarnation, (rcvRecCursor2_c.targetSCN is null OR rcvRecCursor2_c.clean2SCN <= rcvRecCursor2_c.targetSCN) AND -- don't advance ckpt beyond until scn, unless we don't know -- where this offline range ends. (untilscn is null OR rcvRecCursor2_c.clean2SCN <= untilSCN OR rcvRecCursor2_c.clean2SCN = 281474976710655) UNION ALL SELECT offlineRangeRec_con_t type_con, to_number(null) key_con, to_number(null) recid_con, to_number(null) stamp_con, to_number(null) setStamp_con, to_number(null) setCount_con, to_number(null) bsRecid_con, to_number(null) bsStamp_con, to_number(null) bsKey_con, to_number(null) bsLevel_con, to_char(null) bsType_con, to_number(null) elapseSecs_con, to_number(null) pieceCount_con, to_char(null) fileName_con, to_char(null) tag_con, to_number(null) copyNumber_con, to_char(null) status_con, to_number(null) blocks_con, to_number(null) blockSize_con, to_char(null) deviceType_con, to_date(null) compTime_con, to_date(null) cfCreationTime_con, to_number(null) pieceNumber_con, to_date(null) bpCompTime_con, to_char(null) bpCompressed_con, to_char(null) multi_section_con, spanningRange_act_t type_act, rcvRecCursor2_c.targetSCN fromSCN_act, to_number(null) toSCN_act, to_date(null) toTime_act, rcvRecCursor2_c.dbincRlgSCN rlgSCN_act, rcvRecCursor2_c.dbincRlgTime rlgTime_act, rcvRecCursor2_c.dbincKey dbincKey_act, to_number(null) level_act, 0 section_size_act, fno dfNumber_obj, crescn dfCreationSCN_obj, to_number(null) cfSequence_obj, to_date(null) cfDate_obj, to_number(null) logSequence_obj, to_number(null) logThread_obj, to_number(null) logRlgSCN_obj, to_date(null) logRlgTime_obj, to_number(null) logLowSCN_obj, to_date(null) logLowTime_obj, to_number(null) logNextSCN_obj, to_date(null) logNextTime_obj, to_char(null) logTerminal_obj, to_char(null) cfType_obj, to_number(null) keep_options, to_date(null) keep_until, to_number(null) afzSCN_act, to_date(null) rfzTime_act, to_number(null) rfzSCN_act, to_char(null) media_con, 'NO' isrdf_con, 0 site_key_con, rcvRecCursor2_c.foreignDbid foreignDbid_obj, rcvRecCursor2_c.pluggedRonly pluggedRonly_obj, rcvRecCursor2_c.pluginSCN pluginSCN_obj, rcvRecCursor2_c.pluginRlgSCN pluginRlgSCN_obj, rcvRecCursor2_c.pluginRlgTime pluginRlgTime_obj, crescn newDfCreationSCN_obj, to_number(null) newToSCN_act, to_number(null) newRlgSCN_act, to_date(null) newRlgTime_act, to_char(null) sfDbUniqueName_obj FROM dual WHERE bitand(rcvRecCursor2_c.excludeAction, spanningRange_act_t) = 0 -- a offline range which spans multiple resetlogs triggers -- this condition AND rcvRecCursor2_c.pluggedRonly = 0 AND rcvRecCursor2_c.targetSCN < rcvRecCursor2_c.dbincRlgSCN ORDER BY newToSCN_act desc, fromSCN_act desc, type_con asc, stamp_con desc; -- This cursor returns the list of archived log that have given number of -- backups on a specific device type ordered by backup completion time. -- There is never a need to call this cursor since 11gR1. CURSOR sinceLastBackedAL_c (devtype IN VARCHAR2, numofbackups IN NUMBER) IS SELECT * FROM ( SELECT low_scn, next_scn, next_time, count(*) over (partition by sequence#, thread#, dbinc_key) nbackups FROM ( SELECT sequence#, thread#, low_scn, next_scn, next_time, dbinc_key FROM brl, (SELECT UNIQUE bs.bs_key, copy#, pieces, count(piece#) over (partition by bs.bs_key, copy#) pieces_count, device_type, bs.completion_time FROM bs, bp WHERE bs.db_key = this_db_key AND bp.status = 'A' AND bs.bck_type = 'L' AND bs.bs_key = bp.bs_key AND (devtype IS NULL OR devtype = device_type) AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared=TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared=TRUE# AND bp.device_type <>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) ) allbs WHERE brl.bs_key = allbs.bs_key AND allbs.pieces_count = allbs.pieces UNION ALL SELECT sequence#, thread#, first_change#, next_change#, next_time, dbinc_key FROM rc_proxy_archivedlog WHERE db_key = this_db_key AND status = 'A' AND (devtype IS NULL OR devtype = device_type) ) ) WHERE nbackups >= numofbackups ORDER BY next_time DESC; -- This cursor returns a restore point with a matching name or all names CURSOR restore_point_c (name IN VARCHAR2) IS SELECT name, r.dbinc_key dbinc#, scn, creation_time, restore_point_time, guarantee_flashback_database guaranteed, preserved, reset_scn, reset_time FROM rc_restore_point r, dbinc d WHERE d.dbinc_key = r.dbinc_key AND site_key = nvl(user_site_key, this_site_key) AND (name = restore_point_c.name OR restore_point_c.name is null) ORDER BY scn, creation_time; ------------------------------------------------ -- *** PRIVATE FUNCTION/PROCEDURE SECTION *** -- ------------------------------------------------ -- -- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -- -- The code between BEGIN_PRVCOMMON_RCVMAN_CODE and END_PRVCOMMON_RCVMAN_CODE -- is included also in the target database version of the RCVMAN package (that -- is prvtrmns.pls). The processing is done by Makefile in sqlexec/fixedpkg. -- -- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -- -- BEGIN_PRVCOMMON_RCVMAN_CODE --------------- -- Debugging -- --------------- ------------------------------------- deb ------------------------------------- PROCEDURE deb( type IN number ,line IN varchar2 DEFAULT NULL ) IS pname varchar2(512); pref varchar2(11) := 'DBGRCVMAN: '; BEGIN IF (not debug) THEN RETURN; END IF; IF type = DEB_ENTER THEN pname := line; dbms_output.put_line(pref||rpad(' ',pname_i)||'ENTERING '||pname); pname_i := pname_i + 1; last_pnames(pname_i) := pname; ELSIF type = DEB_IN THEN dbms_output.put_line(pref||rpad(' ',pname_i+2)||last_pnames(pname_i)|| ' '||line); ELSIF type = DEB_EXIT THEN IF (pname_i >= 1) THEN pname := last_pnames(pname_i); pname_i := pname_i - 1; ELSE pname := DEB_DEF_PNAME; END IF; IF line is not NULL THEN dbms_output.put_line(pref||rpad(' ', pname_i)||'EXITING '||pname|| ' '||line); ELSE dbms_output.put_line(pref||rpad(' ', pname_i)||'EXITING '||pname); END IF; ELSIF type = DEB_OPEN THEN pname := last_pnames(pname_i); dbms_output.put_line(pref||rpad(' ', pname_i)||'OPENING cursor '|| line||' in '||pname); ELSE dbms_output.put_line(pref||line); END IF; EXCEPTION WHEN others THEN dbms_output.put_line('caught exception during deb ' || substr(sqlerrm, 1, 512)); END; FUNCTION bool2char( flag IN boolean) RETURN varchar2 IS BEGIN IF (flag) THEN RETURN 'TRUE'; ELSE RETURN 'FALSE'; END IF; END bool2char; ----------------- -- Translation -- ----------------- ------------------------------ setAlTransClause ------------------------------- PROCEDURE setAlTransClause( thread IN NUMBER DEFAULT NULL ,fromTime IN DATE DEFAULT NULL ,toTime IN DATE DEFAULT NULL ,fromSCN IN NUMBER DEFAULT NULL ,toSCN IN NUMBER DEFAULT NULL ,fromSeq IN NUMBER DEFAULT NULL ,toSeq IN NUMBER DEFAULT NULL ,pattern IN VARCHAR2 DEFAULT NULL) IS BEGIN tc_thread := thread; tc_fromTime := fromTime; tc_toTime := toTime; tc_fromSCN := fromSCN; tc_toSCN := toSCN; tc_fromSeq := fromSeq; tc_toSeq := toSeq; tc_pattern := pattern; deb(DEB_PRINT, 'tc_thread=' || tc_thread); deb(DEB_PRINT, 'tc_fromSCN=' || fromSCN); deb(DEB_PRINT, 'tc_toSCN=' || toSCN); deb(DEB_PRINT, 'tc_fromSeq=' || fromSeq); deb(DEB_PRINT, 'tc_fromTime=' || fromTime); deb(DEB_PRINT, 'tc_toTime=' || toTime); deb(DEB_PRINT, 'tc_toSeq=' || toSeq); deb(DEB_PRINT, 'tc_pattern=' || pattern); END setAlTransClause; ------------------------------ setDfTransClause ------------------------------- PROCEDURE setDfTransClause( fno IN NUMBER) IS BEGIN tc_fno(fno) := TRUE; END setDfTransClause; ------------------------------ setDBTransClause ------------------------------- PROCEDURE setDBTransClause IS BEGIN deb(DEB_PRINT, 'tc_database=TRUE'); tc_database := TRUE#; END setDBTransClause; ------------------------------ resetAlTransClause ----------------------------- PROCEDURE resetAlTransClause IS BEGIN tc_thread := to_number(null); tc_fromTime := to_date(null); tc_toTime := to_date(null); tc_fromSCN := to_number(null); tc_toSCN := to_number(null); tc_fromSeq := to_number(null); tc_toSeq := to_number(null); tc_pattern := to_char(null); currInc := -1; getArchivedLogDoingRecovery := FALSE#; -- clear for next time getArchivedLogOnlyrdf := 0; tc_threadSeq.delete; END resetAlTransClause; ------------------------------ resetDBTransClause ----------------------------- PROCEDURE resetDBTransClause IS BEGIN tc_database := FALSE#; tc_fno.delete; END resetDBTransClause; ------------------------------ resetDbidTransClause --------------------------- PROCEDURE resetDbidTransClause IS BEGIN tc_anydbid := FALSE#; tc_dbid.delete; END resetDBidTransClause; -------------------------- -- Database Translation -- -------------------------- -- Private procedure to check if a tablespace should be skipped. -------------------------------- skipTableSpace ------------------------------- FUNCTION skipTableSpace( tsName IN varchar2) RETURN boolean IS BEGIN deb(DEB_ENTER, 'skipTableSpace'); FOR i in 1..skipTablespaceCount LOOP IF (tsName = skipTablespaceList(i)) THEN deb(DEB_EXIT, 'with: TRUE'); RETURN TRUE; END IF; END LOOP; deb(DEB_EXIT, 'with: FALSE'); RETURN FALSE; END; ---------------------------- isDeviceTypeAllocated ---------------------------- FUNCTION isDeviceTypeAllocated( deviceType IN varchar2) RETURN NUMBER IS BEGIN IF (anyDevice = TRUE#) THEN RETURN TRUE#; END IF; FOR i IN 1..deviceCount LOOP IF deviceType = deviceList(i) THEN RETURN TRUE#; END IF; END LOOP; RETURN FALSE#; END isDeviceTypeAllocated; --------------------------- -- Backup Set Validation -- --------------------------- -- Validate a backup set has all pieces available according the the specified -- arguments. Returns only 1 validBackupSetRec. There may be other sets of -- pieces that match the criteria. ---------------------------- computeAvailableMask ----------------------------- FUNCTION computeAvailableMask( available IN number ,unavailable IN number ,deleted IN number ,expired IN number ,partial_avail IN number DEFAULT 0) RETURN binary_integer IS rc binary_integer := 0; BEGIN deb(DEB_ENTER, 'computeAvailableMask'); IF (available > 0) THEN rc := rc + dbms_rcvman.BSavailable; END IF; IF (unavailable > 0 ) THEN rc := rc + dbms_rcvman.BSunavailable; END IF; IF (deleted > 0 ) THEN rc := rc + dbms_rcvman.BSdeleted; END IF; IF (expired > 0 ) THEN rc := rc + dbms_rcvman.BSexpired; END IF; IF (partial_avail > 0 ) THEN rc := rc + dbms_rcvman.BSpartial_avail; END IF; deb(DEB_EXIT, 'with rc:'||to_char(rc)); RETURN rc; END computeAvailableMask; ------------------------------ validateBackupSet0 ----------------------------- FUNCTION validateBackupSet0( tag IN varchar2 DEFAULT NULL ,tagMatchRequired IN boolean DEFAULT TRUE ,checkDeviceIsAllocated IN boolean DEFAULT TRUE ,validRec OUT NOCOPY validBackupSetRec_t) RETURN binary_integer IS local validBackupSetRec_t; rc binary_integer; gotRecord number; BEGIN deb(DEB_ENTER, 'validateBackupSet0'); <> LOOP <> gotRecord := getValidBackupSet(validBackupSetRec => local, checkDeviceIsAllocated => FALSE#); EXIT WHEN gotRecord = FALSE#; -- cursor is closed already IF (checkDeviceIsAllocated) THEN IF (anyDevice = FALSE# AND isDeviceTypeAllocated(local.deviceType) = FALSE#) THEN deb(DEB_IN, 'device is not allocated'); -- The required deviceType is not allocated. Remember that -- we found an available backup set though. IF (rc IS NULL OR rc <> SUCCESS) THEN deb(DEB_IN, 'set rc to available'); rc := dbms_rcvman.AVAILABLE; END IF; GOTO nextRow; END IF; END IF; validRec := local; -- set OUT mode arg IF (tag IS NOT NULL AND NOT tagMatchRequired) THEN -- We are looking for backup sets with a particular tag, but -- we want to know about a backup set even if its tag does not match. -- In this case, the findValidBackupSet_c or loadBsRecCache cursor -- did not do the tag checking for us, so we need to do it here. IF (tag = local.tag) THEN -- We've found a match for the tag, so we are done. This -- backupset meets all of the validation criteria. There may be -- another set of pieces with a different copy# or a different -- deviceType that also meet the criteria. If the caller wants to -- find them, he will have to use the find/getValidBackupSet -- procedures to get the entire list, or call this routine -- again. deb(DEB_IN, 'tag matches'); rc := SUCCESS; deb(DEB_IN, 'exiting loop with rc: SUCCESS'); EXIT validationLoop; ELSE -- Even though the tag does not match, we want to remember -- about this set of pieces. Keep looking for a match for our -- tag though. deb(DEB_IN, 'tag does not match, continuing search'); rc := SUCCESS; END IF; ELSE -- Here, we know that the backupset is valid. There may be multiple -- valid copies available, but we don't care. The fact that one copy -- is available is good enough for us. rc := SUCCESS; deb(DEB_IN, 'exiting loop with rc: SUCCESS'); EXIT validationLoop; END IF; END LOOP; IF (rc IS NULL) THEN deb(DEB_IN, 'rc is null, setting to unavailable'); rc := dbms_rcvman.UNAVAILABLE; END IF; deb(DEB_EXIT, 'with rc:'||to_char(rc)); RETURN rc; END validateBackupSet0; ------------------------------------------ -- Compute Recovery Actions Subroutines -- ------------------------------------------ ------------------------------ getRecStackCount ------------------------------ FUNCTION getRecStackCount RETURN binary_integer IS BEGIN RETURN rcvRecStack.count; END getRecStackCount; ------------------------------ getRecFullCount ------------------------------- FUNCTION getRecFullCount RETURN binary_integer IS BEGIN RETURN rcvRecStackState.fullBackups; END getRecFullCount; --------------------------------- rcvRecPush ---------------------------------- PROCEDURE rcvRecPush( rcvRec IN rcvRec_t) IS BEGIN rcvRecStack.extend; deb(DEB_PRINT,'rcvRecPush:from_scn='||rcvRec.fromSCN_act||',to_scn='||rcvRec.toSCN_act||',rcvRecStackCount='||rcvRecStack.count); rcvRecStack(rcvRecStack.last) := rcvRec; END rcvRecPush; ---------------------------------- rcvRecGet ---------------------------------- PROCEDURE rcvRecGet( indx IN number ,rcvRec OUT NOCOPY rcvRec_t) IS BEGIN rcvRec := rcvRecStack(indx); END rcvRecGet; ---------------------------------- rcvRecTop ---------------------------------- PROCEDURE rcvRecTop( rcvRec OUT NOCOPY rcvRec_t) IS BEGIN IF (rcvRecStack.count = 0) THEN rcvRec := NULL; ELSE rcvRecGet(rcvRecStack.count, rcvRec); deb(DEB_PRINT,'rcvRecPop:from_scn='||rcvRec.fromSCN_act|| ',to_scn='||rcvRec.toSCN_act|| ',rcvRecStackCount='||rcvRecStack.count ); END IF; END rcvRecTop; ---------------------------------- rcvRecPop ---------------------------------- PROCEDURE rcvRecPop( rcvRec OUT NOCOPY rcvRec_t) IS BEGIN rcvRecTop(rcvRec); rcvRecStack.trim; END rcvRecPop; -------------------------------- rcvRecConvert -------------------------------- PROCEDURE rcvRecConvert( rcvRec IN OUT NOCOPY rcvRec_t) IS BEGIN -- Get rid of any nulls that would be unacceptable to the pre-8.1.6 -- RMAN. The pre-8.1.6 RMAN did not use null-indicators consistantly. rcvRec.recid_con := nvl(rcvRec.recid_con, 0); rcvRec.stamp_con := nvl(rcvRec.stamp_con, 0); rcvRec.setStamp_con := nvl(rcvRec.setStamp_con, 0); rcvRec.setCount_con := nvl(rcvRec.setCount_con, 0); rcvRec.fileName_con := nvl(rcvRec.fileName_con, 'NULL'); rcvRec.blockSize_con := nvl(rcvRec.blockSize_con, 0); rcvRec.blocks_con := nvl(rcvRec.blocks_con, 0); rcvRec.deviceType_con := nvl(rcvRec.deviceType_con, 'NULL'); END rcvRecConvert; --------------------------------- printRcvRec --------------------------------- PROCEDURE printRcvRec( action IN rcvRec_t ,summary IN boolean default FALSE) IS l varchar2(600); cfcretime varchar2(100); action_deleted boolean; procedure prt(str in out varchar2) is begin if length(str) > 2 then deb(DEB_PRINT, str); str := ' '; end if; end; BEGIN ------------------------ -- Recovery Container -- ------------------------ deb(DEB_PRINT, 'DUMPING RECOVERY CONTAINER'); -- Print the containter type IF (action.type_con = backupSet_con_t) THEN IF (action.type_act = full_act_t) THEN deb(DEB_PRINT, ' Full Backup Set'); ELSE deb(DEB_PRINT, ' Incremental Backup Set'); END IF; ELSIF (action.type_con = proxyCopy_con_t) THEN deb(DEB_PRINT, ' Proxy Backup'); ELSIF (action.type_con = imageCopy_con_t) THEN deb(DEB_PRINT, ' Datafile Copy'); ELSIF (action.type_con = offlineRangeRec_con_t) THEN IF (action.type_act = offlineRange_act_t) THEN deb(DEB_PRINT, ' Offline Range Record'); ELSIF (action.type_act = cleanRange_act_t) THEN deb(DEB_PRINT, ' Clean Range'); ELSIF (action.type_act = implicitRange_act_t) THEN deb(DEB_PRINT, ' Implicit Offline Range'); ELSIF (action.type_act = spanningRange_act_t) THEN deb(DEB_PRINT, ' Spanning Offline Range'); ELSE deb(DEB_PRINT, ' Unknown Offline Range Action Type'); END IF; ELSIF (action.type_con = datafile_con_t) THEN deb(DEB_PRINT, ' datafile container type'); ELSIF (action.type_con = addredo_con_t) THEN deb(DEB_PRINT,'Add Redo'); ELSE deb(DEB_PRINT, ' Unknown recovery container type'); END IF; -- Print the type-specific container data IF (action.type_con = backupSet_con_t) THEN deb(DEB_PRINT, ' bsKey=' || to_char(action.bsKey_con) || ' bsRecid=' || to_char(action.bsRecid_con) || ' bsStamp=' || to_char(action.bsStamp_con) || ' setStamp=' || to_char(action.setStamp_con) || ' setCount=' || to_char(action.setCount_con) || ' site_key=' || to_char(action.site_key_con)); deb(DEB_PRINT, ' bsLevel=' || to_char(action.bsLevel_con) || ' bsType=' || action.bsType_con || ' pieceCount=' || to_char(action.pieceCount_con)); deb(DEB_PRINT, ' multi_section=' || nvl(action.multi_section_con, 'NULL')); ELSIF (action.type_con = proxyCopy_con_t OR action.type_con = imageCopy_con_t) THEN deb(DEB_PRINT, ' fileName=' || action.fileName_con); deb(DEB_PRINT, ' media=' || action.media_con); END IF; IF (summary) THEN RETURN; END IF; -- Print the remaining container data l := ' '; -- key_con IF (action.key_con is not null) THEN l := l || ' key=' || to_char(action.key_con); END IF; -- recid_con, stamp_con IF (action.recid_con is not null) THEN l := l || ' recid=' || to_char(action.recid_con) || ' stamp=' || to_char(action.stamp_con); END IF; -- status_con IF (action.status_con is not null) THEN l := l || ' status=' || action.status_con; END IF; prt(l); -- tag IF (action.tag_con is not null) THEN l := l || ' tag=' || action.tag_con; END IF; -- compTime IF (action.compTime_con is not null) THEN l := l || ' compTime=' || to_char(action.compTime_con); END IF; prt(l); IF (action.deviceType_con is not null) THEN l := l || ' deviceType=' || action.deviceType_con; END IF; IF (action.blocks_con is not null) THEN l := l || ' blocks=' || to_char(action.blocks_con) || ' blockSize=' || to_char(action.blockSize_con); END IF; IF (action.cfCreationTime_con is not null) THEN l := l || ' cfCreationTime=' || to_char(action.cfCreationTime_con); END IF; IF (action.pieceNumber_con is not null) THEN l := l || ' pieceNumberl=' || to_char(action.pieceNumber_con); END IF; IF (action.bpCompTime_con is not null) THEN l := l || ' bpCompTime=' || to_char(action.bpCompTime_con); END IF; IF (action.bpCompressed_con is not null) THEN l := l || ' bpCompressed=' || to_char(action.bpCompressed_con); END IF; prt(l); --------------------- -- Recovery Action -- --------------------- -- fromSCN IF (action.fromSCN_act is not null) THEN l := l || ' fromSCN=' || to_char(action.fromSCN_act); END IF; -- toSCN toTime IF (action.toSCN_act is not null) THEN l := l || ' toSCN=' || to_char(action.toSCN_act) || ' toTime=' || to_char(action.toTime_act); END IF; -- level IF (action.level_act is not null) THEN l := l || ' level=' || to_char(action.level_act); END IF; -- section size IF (action.section_size_act is not null) THEN l := l || ' section_size=' || to_char(action.section_size_act); END IF; prt(l); IF (action.rlgSCN_act is not null) THEN l := l || ' rlgSCN=' || to_char(action.rlgSCN_act) || ' rlgTime=' || to_char(action.rlgTime_act) || ' dbincKey=' || to_char(action.dbincKey_act); END IF; prt(l); IF (action.afzSCN_act is not null) THEN l := l || ' afzSCN=' || to_char(action.afzSCN_act); END IF; prt(l); IF (action.rfzSCN_act is not null AND action.rfzSCN_act != 0) THEN l := l || ' rfzSCN=' || to_char(action.rfzSCN_act) || ' rfzTime=' || nvl(to_char(action.rfzTime_act), 'NULL'); END IF; prt(l); --------------------- -- Recovery Object -- --------------------- IF (action.dfNumber_obj IS NOT NULL) THEN l := l || ' dfNumber=' || to_char(action.dfNumber_obj) || ' creationSCN=' || to_char(action.dfCreationSCN_obj) || ' pluginSCN=' || to_char(action.pluginSCN_obj) || ' foreignDbid=' || to_char(action.foreignDbid_obj) || ' pluggedRonly=' || to_char(action.pluggedRonly_obj); deb(DEB_PRINT, l); l := ' '; l := l || ' cfType=' || nvl(action.cfType_obj, 'NULL'); deb(DEB_PRINT, l); l := ' '; l := l || ' keep_options=' || nvl(to_char(action.keep_options), 'NULL') || ' keep_until=' || nvl(to_char(action.keep_until), 'NULL'); deb(DEB_PRINT, l); IF (action.cfSequence_obj IS NOT NULL) THEN l := ' '; l := l || ' cfSequence=' || to_char(action.cfSequence_obj) || ' cfDate=' || nvl(to_char(action.cfDate_obj), 'NULL'); deb(DEB_PRINT, l); END IF; ELSIF (action.logSequence_obj IS NOT NULL) THEN l := l || ' logSequence=' || to_char(action.logSequence_obj); deb(DEB_PRINT, l); l := ' '; l := l || ' logThread=' || to_char(action.logThread_obj); deb(DEB_PRINT, l); l := ' '; l := l || ' logLowSCN=' || to_char(action.logLowSCN_obj); deb(DEB_PRINT, l); l := ' '; l := l || ' logLowTime=' || to_char(action.logLowTime_obj); deb(DEB_PRINT, l); l := ' '; l := l || ' logNextSCN=' || nvl(to_char(action.logNextSCN_obj), 'NULL'); deb(DEB_PRINT, l); l := ' '; l := l || ' logNextTime=' || nvl(to_char(action.logNextTime_obj), 'NULL'); deb(DEB_PRINT, l); l := ' '; l := l || ' logTerminalEor=' || action.logTerminal_obj; deb(DEB_PRINT, l); l := ' '; l := l || ' logRlgSCN=' || nvl(to_char(action.logRlgSCN_obj), 'NULL'); deb(DEB_PRINT, l); l := ' '; l := l || ' logRlgTime=' || nvl(to_char(action.logRlgTime_obj), 'NULL'); deb(DEB_PRINT, l); ELSIF (action.toTime_act IS NOT NULL) THEN deb(DEB_PRINT, ' SPFILE'); deb(DEB_PRINT, ' modification_time=' || to_char(action.toTime_act)); deb(DEB_PRINT, ' db_unique_name=' || action.sfDbUniqueName_obj); l := ' '; l := l || ' keep_options=' || nvl(to_char(action.keep_options), 'NULL') || ' keep_until=' || nvl(to_char(action.keep_until), 'NULL'); deb(DEB_PRINT, l); ELSE deb(DEB_PRINT, ' Unknown Recovery Object'); END IF; EXCEPTION WHEN OTHERS THEN deb(DEB_PRINT, 'printRcvRec: caught an exception, aborting print'); RETURN; END printRcvRec; -- See if redo is needed to fill a gap between this action and the one -- that follows it, which is already on the rcvRecStack and is pointed to -- by rcvRecStackState.lowAction ---------------------------------- redoNeeded --------------------------------- FUNCTION redoNeeded( action IN rcvRec_t) RETURN boolean IS BEGIN deb(DEB_ENTER, 'redoNeeded'); IF (rcvRecStackState.lowAction > 0 AND -- Have a non-full_act_t on stack? action.toSCN_act < rcvRecStack(rcvRecStackState.lowAction).fromSCN_act) THEN deb(DEB_EXIT, 'with: TRUE'); RETURN TRUE; ELSE deb(DEB_EXIT, 'with: FALSE'); RETURN FALSE; END IF; END redoNeeded; ---------------------------------- canAddRedo --------------------------------- FUNCTION canAddRedo( isAncestor IN boolean ,from_scn IN number ,from_rlgscn IN number ,to_action IN rcvRec_t ,partial_rcv IN boolean ,doingRecovery IN boolean) RETURN number IS BEGIN deb(DEB_ENTER, 'canAddRedo'); IF (from_rlgscn = this_reset_scn) THEN IF (partial_rcv) THEN deb(DEB_EXIT, 'with: action_OK'); RETURN action_OK; ELSE -- A partial media recovery can only be done if we have a -- current controlfile. A partial recovery does not -- recover the controlfile. This could be implemented, -- but it requires using an enqueue to ensure only -- 1 process tries to recover the confile. -- Since we aren't recovering the controlfile, -- the file header won't be handled properly when -- we hit controlfile redo. That is why we are -- requiring a current controlfile. Since we don't have -- one, we will have to do database recovery on this datafile. -- It is possible that we have a current controlfile here, -- but RMAN currently does not support partial media recovery. deb(DEB_EXIT, 'with: action_FAIL'); RETURN action_FAIL; END IF; ELSE deb(DEB_IN, 'from_rlgscn=' || nvl(to_char(from_rlgscn), 'NULL') || ' this_reset_scn=' || to_char(this_reset_scn)); IF (isAncestor) THEN deb(DEB_IN, 'isAncestor is TRUE;'); IF (canApplyAnyRedo = TRUE# AND from_scn >= nvl(inc_list(max_inc_idx-1).prior_resetlogs_change#, inc_list(max_inc_idx-1).resetlogs_change#)) THEN deb(DEB_PRINT, 'canAddRedo: return action_OLD_INC_REDO'); return action_OLD_INC_REDO; ELSE -- Since allIncarnations variable combined with doingRecovery -- is used to interpolate what command computeRecoveryAction -- is called for, given below is the action taken for different -- commands : -- For restore/recover - always return action_OLD_REDO -- For REPORT/DELETE OBSOLETE - return okay for parent -- incarnation backups as user can do manual recovery thru -- resetlogs. So old backups are not reported as obsolete. -- for LIST RECOVERABLE - pre-10i cannot use old incarnation -- backups, while same is true for 10i if not known to -- incarnation table. -- -- NOTE: REPORT/DELETE OBSOLETE sets allIncarnations to TRUE -- while LIST RECOVERABLE, RESTORE, RECOVER does not. IF (doingRecovery) THEN deb(DEB_PRINT, 'with: action_OLD_REDO (doingRecovery)'); RETURN action_OLD_REDO; ELSIF (allIncarnations = TRUE#) THEN deb(DEB_PRINT, 'canAddRedo: returning action_OK'); RETURN action_OK; ELSE deb(DEB_PRINT, 'canAddRedo: returning action_OLD_REDO'); RETURN action_OLD_REDO; END IF; END IF; ELSE deb(DEB_IN, 'isAncestor is FALSE;'); -- We should never attempt to apply redo from an incartion that -- we don't know with certainty is one of our ancestors. deb(DEB_EXIT, 'with: action_OLD_REDO'); RETURN action_OLD_REDO; END IF; END IF; deb(DEB_EXIT, 'with undefined status'); END canAddRedo; ----------------------------------- addRedo ----------------------------------- FUNCTION addRedo( isAncestor IN boolean ,from_scn IN number ,from_rlgscn IN number ,to_action IN rcvRec_t ,partial_rcv IN boolean ,doingRecovery IN boolean) RETURN number IS canAdd number; BEGIN deb(DEB_ENTER, 'addRedo'); deb(DEB_IN,'Enter - from_scn=' || from_scn|| ',from_rlgscn=' ||from_rlgscn); canAdd := canAddRedo(isAncestor, from_scn, from_rlgscn, to_action, partial_rcv, doingRecovery); IF (canAdd = action_FAIL) THEN -- Trim to the last save point. If we are doing RESTORE or RECOVER, -- then this will discard all actions we have stacked so far. -- If we are doing LIST, this will discard all actions up to -- the last full/level 0/copy/proxy-copy that we found. We never -- need to trim actions that come after the save point since we -- know they can be applied. -- Since we have a gap in the redo stream here, reset lowAction -- to 0. No actions that may be coming after this one will cover -- this gap since their to_scn's must be <= this action's. -- It is possible that the lowAction is about to be trimmed, but -- even if it remains on the stack, we have a broken chain and -- need to start a new one. rcvRecStackState.lowAction := 0; rcvRecStack.trim(rcvRecStack.last - greatest(rcvRecStackState.savePoint, rcvRecStackState.top)); deb(DEB_IN,'trimming savepoint1, rcvRecStackCount='|| rcvRecStack.count); deb(DEB_EXIT, 'with: action_FAIL'); RETURN action_FAIL; ELSIF (canAdd = action_OK) THEN redoRec.type_act := redo_act_t; redoRec.fromSCN_act := from_scn; redoRec.toSCN_act := to_action.fromSCN_act; redoRec.toTime_act := to_date(null); redoRec.rlgSCN_act := from_rlgscn; redoRec.dfNumber_obj := to_action.dfNumber_obj; redoRec.dfCreationSCN_obj := to_action.dfCreationSCN_obj; redoRec.pluginSCN_obj := 0; redoRec.pluggedRonly_obj := 0; rcvRecPush(redoRec); deb(DEB_EXIT, 'with: action_OK'); RETURN action_OK; ELSIF (canAdd = action_OLD_INC_REDO) THEN redoRec.type_con := addredo_con_t; redoRec.type_act := redo_act_t; redoRec.fromSCN_act := from_scn; redoRec.toSCN_act := to_action.fromSCN_act; redoRec.toTime_act := to_date(null); redoRec.rlgSCN_act := from_rlgscn; redoRec.dfNumber_obj := to_action.dfNumber_obj; redoRec.dfCreationSCN_obj := to_action.dfCreationSCN_obj; redoRec.pluginSCN_obj := 0; redoRec.pluggedRonly_obj := 0; deb(DEB_EXIT, 'with: action_OLD_INC_REDO'); RETURN action_OLD_INC_REDO; ELSE -- ancestral incarnation -- Application of redo from an ancestral incarnation is not supported. -- However, we cannot trim these actions as we did in the case above -- because they are required to reach the offline-range that spans -- the resetlogs. So we simply return an error. deb(DEB_EXIT, 'with: action_OLD_REDO'); RETURN action_OLD_REDO; END IF; deb(DEB_EXIT, 'with undefined status'); END addRedo; -- Following are return code for CheckRecAction : -- SUCCESS means more validatation if required will be done in caller function -- ation_SKIP means the action is orphan. FUNCTION CheckRecAction( action IN rcvRec_t) RETURN number IS rlgSCN number; rlgTime date; toSCN number; fromSCN number; BEGIN IF (canApplyAnyRedo = FALSE#) THEN return SUCCESS; END IF; IF (action.pluggedRonly_obj != 0) THEN deb(DEB_PRINT, 'CheckRecAction called for plugged readonly action'); rlgSCN := action.pluginRlgSCN_obj; rlgTime := action.pluginRlgTime_obj; toSCN := action.pluginSCN_obj; fromSCN := action.pluginSCN_obj; ELSE rlgSCN := action.rlgSCN_act; rlgTime := action.rlgTime_act; toSCN := action.toSCN_act; fromSCN := action.fromSCN_act; END IF; deb(DEB_PRINT, ' CheckRecAction called '|| to_char(rlgTime,'MM/DD/RR HH24:MI:SS')|| '; rlgscn='||rlgSCN); IF (action.type_con = backupSet_con_t OR action.type_con = imageCopy_con_t OR action.type_con = proxyCopy_con_t ) THEN FOR inc_idx in 0..max_inc_idx-1 LOOP IF (rlgSCN = inc_list(inc_idx).resetlogs_change# AND rlgTime = inc_list(inc_idx).resetlogs_time ) THEN IF (inc_idx = 0 OR toSCN <= inc_list(inc_idx-1).resetlogs_change#) THEN deb(DEB_PRINT, 'CheckRecAction:matches inc='||inc_idx|| ',fromscn='||fromSCN || ',toscn='||toSCN); return SUCCESS; ELSE deb(DEB_PRINT, 'CheckRecAction: inc='||inc_idx|| ',toscn='||toSCN|| ' exceeds '||inc_list(inc_idx-1).resetlogs_change#); deb(DEB_PRINT, 'CheckRecAction:belongs to orphan branch of this incarnation:'); return action_SKIP; END IF; END IF; END LOOP; deb(DEB_PRINT, 'CheckRecAction:not known to incarnation table'); ELSE return SUCCESS; END IF; -- Could be belonging to one of the parent incarnation not known to -- incarnation table, For now I will handle these using recursive call to -- computeRecoveryaction as we used to do in 9i or prior. return action_SKIP; END CheckRecAction; ----------------------------- isValidAction ----------------------------------- FUNCTION isValidAction(action IN rcvRec_t) RETURN boolean IS valid boolean := TRUE; BEGIN IF (bitand(action.type_con, getRA_containerMask) = 0) THEN deb(DEB_PRINT, 'isValidAction: skipping non-selected container type'); deb(DEB_PRINT, 'isValidAction: Container type : '|| action.type_con); deb(DEB_PRINT, 'isValidAction: Container Mask : '|| getRA_containerMask); valid := FALSE; -- then skip this action ELSIF (bitand(action.type_act, getRA_actionMask) = 0) THEN deb(DEB_PRINT, 'isValidAction: skipping non-selected action type'); deb(DEB_PRINT, 'isValidAction: Action type : '|| action.type_act); deb(DEB_PRINT, 'isValidAction: Action Mask : '|| getRA_actionMask); valid := FALSE; -- then skip this action ELSIF (bitand(action.type_con, deleted_con_t) > 0) THEN deb(DEB_PRINT, 'isValidAction: deleted action skipped:'); valid := FALSE; -- then skip this action -- If we were doing a LIST, compare the TAG if specified ELSIF (computeRA_allRecords = TRUE# AND restoreTag is not null AND bitand(action.type_con, tagMask_con_t) > 0 AND (action.tag_con <> restoreTag OR action.tag_con is null)) THEN deb(DEB_PRINT, 'isValidAction: tag mismatch - skipped:'); valid := FALSE; -- then skip this action -- Compare the COMPLETED AFTER time if specified. Note that -- action.compTime may be null. ELSIF (getRA_completedAfter IS NOT NULL AND action.compTime_con < getRA_completedAfter) THEN deb(DEB_PRINT, 'isValidAction: compTime < completedAfter - skipped:'); valid := FALSE; -- then skip this action -- Compare the COMPLETED BEFORE if specified ELSIF (getRA_completedBefore IS NOT NULL AND action.compTime_con > getRA_completedBefore) THEN deb(DEB_PRINT, 'isValidAction: compTime > completedBefore - skipped:'); valid := FALSE; -- Compare the LIKE pattern if specified ELSIF (getRA_likePattern IS NOT NULL AND action.fileName_con NOT LIKE getRA_likePattern) THEN deb(DEB_PRINT, 'isValidAction: LikePattern not matched - skipped:'); valid := FALSE; END IF; RETURN valid; END isValidAction; ----------------------------- resetrcvRecStack -------------------------------- PROCEDURE resetrcvRecStack IS BEGIN IF (rcvRecStack.count > 0) THEN rcvRecStack.trim(rcvRecStack.count); END IF; rcvRecStackState.lowAction := 0; rcvRecStackState.savePoint := 0; rcvRecStackState.fullBackups := 0; rcvRecStackState.top := 0; END resetrcvRecStack; ------------------------- fetchCursor1RecoveryAction ------------------------- -- At end of this call rcvRecCursor1.currc1 will contain a valid cursor1 -- record that is filtered by rcvRecCursor1Filter_c cursor. -- Client must use rcvRecCursor1.currc1 record only iff -- o type_con is not null -- o fno and crescn matches -- Pass opcode=1 to get a next valid record and opcode=2 to start from -- current record. PROCEDURE fetchCursor1RecoveryAction( dbincKey IN number ,fno IN number ,creSCN IN number ,dfCkpSCN IN number ,dbincRlgSCN IN number ,dbincRlgTime IN date ,offlSCN IN number ,onlSCN IN number ,onlTime IN date ,cleanSCN IN number ,clean2SCN IN number ,clean2Time IN date ,targetSCN IN number ,opcode IN binary_integer -- 1 => seeknext, 2 => seekcurrent ,foreignDbid IN number ,pluggedRonly IN binary_integer -- 1 => readonly, 0 => readwrite ,pluginSCN IN number ,pluginRlgSCN IN number ,pluginRlgTime IN date ,rmanCmd IN binary_integer) IS action rcvRec_t; actCreSCN number; inpCreSCN number; BEGIN deb(DEB_ENTER, 'fetchCursor1RecoveryAction'); deb(DEB_IN, 'opcode=' || to_char(opcode)); -- Return if cursor has no more data. IF (rcvRecCursor1_c%NOTFOUND) THEN rcvRecCursor.currc1.type_con := to_number(null); deb(DEB_EXIT, 'no more records'); RETURN; END IF; IF (pluginSCN != 0) THEN inpCreSCN := pluginSCN; ELSE inpCreSCN := creSCN; END IF; -- Determine where to transfer the control IF (opcode = 1) THEN goto seekNext; ELSIF (opcode = 2) THEN goto seekCurrent; ELSE raise_application_error(-20999, 'fetchCursor1RecoveryAction - 1'); END IF; -- seek to next record given by rcvRecCursor1_c that matches (fno, crescn) <> deb(DEB_IN, 'seekNext'); LOOP rcvRecCursor.currc1.type_con := to_number(null); FETCH rcvRecCursor1_c INTO rcvRecCursor.currc1; IF (rcvRecCursor1_c%NOTFOUND) THEN rcvRecCursor.currc1.type_con := to_number(null); deb(DEB_IN, 'no more records'); EXIT; END IF; IF (rcvRecCursor.currc1.pluginSCN_obj != 0) THEN actCreSCN := rcvRecCursor.currc1.pluginSCN_obj; ELSE actCreSCN := rcvRecCursor.currc1.dfCreationSCN_obj; END IF; deb(DEB_IN, 'rcvRecCursor1_c record'); printRcvRec(rcvRecCursor.currc1); IF (rcvRecCursor.currc1.dfNumber_obj > fno OR (rcvRecCursor.currc1.dfNumber_obj = fno AND actCreSCN > inpCreSCN) OR (rcvRecCursor.currc1.dfNumber_obj = fno AND actCreSCN = inpCreSCN)) THEN -- Do not trash rcvRecCursor.currc1 record because it is -- the starting point of next file. EXIT; END IF; IF (debug) THEN deb(DEB_IN, 'skipped following record: summary'); printRcvRec(rcvRecCursor.currc1, TRUE); END IF; END LOOP; <> IF (rcvRecCursor.currc1.pluginSCN_obj != 0) THEN actCreSCN := rcvRecCursor.currc1.pluginSCN_obj; ELSE actCreScN := rcvRecCursor.currc1.dfCreationSCN_obj; END IF; IF (rcvRecCursor.currc1.type_con is null OR rcvRecCursor.currc1.dfNumber_obj > fno OR (rcvRecCursor.currc1.dfNumber_obj = fno AND actCreSCN > inpCreSCN)) THEN deb(DEB_EXIT, 'seekCurrent - beyond current fno, creSCN'); RETURN; END IF; IF (rcvRecCursor.currc1.dfNumber_obj != fno OR actCreSCN != inpCreSCN) THEN raise_application_error(-20999, 'fetchCursor1RecoveryAction ' || 'dfNumber_obj=' || to_char(rcvRecCursor.currc1.dfNumber_obj) || ' dfCreationSCN_obj=' || to_char(actCreSCN)); END IF; -- A simple check before trying real filter -- Bug-4558970: Optimize performance by skipping the open cursor IF (rmanCmd = blkRestoreCmd_t) THEN IF (rcvRecCursor.currc1.toSCN_act > targetSCN) THEN deb(DEB_IN, 'a. simple filter rejected - trying next'); goto seekNext; END IF; ELSE IF (rcvRecCursor.currc1.toSCN_act < dfCkpSCN AND rcvRecCursor.currc1.fromSCN_act < dfCkpSCN) THEN deb(DEB_IN, 'b. simple filter rejected - trying next'); goto seekNext; END IF; END IF; -- What does our filter say about this record? OPEN rcvRecCursor1Filter_c( dbincKey => dbincKey ,fno => fno ,creSCN => creSCN ,dfCkpSCN => dfCkpSCN ,dbincRlgSCN => dbincRlgSCN ,dbincRlgTime => dbincRlgTime ,offlSCN => offlSCN ,onlSCN => onlSCN ,onlTime => onlTime ,cleanSCN => cleanSCN ,clean2SCN => clean2SCN ,clean2Time => clean2Time ,targetSCN => targetSCN ,c1rec => rcvRecCursor.currc1 ,foreignDbid => foreignDbid ,pluggedRonly => pluggedRonly ,pluginSCN => pluginSCN ,pluginRlgSCN => pluginRlgSCN ,pluginRlgTime => pluginRlgTime ,rmanCmd => rmanCmd); FETCH rcvRecCursor1Filter_c INTO action; IF (rcvRecCursor1Filter_c%NOTFOUND) THEN -- This record is rejected by filter CLOSE rcvRecCursor1Filter_c; deb(DEB_IN, 'real filter rejected - trying next'); goto seekNext; END IF; CLOSE rcvRecCursor1Filter_c; deb(DEB_EXIT, 'filter accepted'); END fetchCursor1RecoveryAction; ----------------------------- fetchRecoveryAction ----------------------------- FUNCTION fetchRecoveryAction( dbincKey IN number ,fno IN number ,creSCN IN number ,dfCkpSCN IN number ,dbincRlgSCN IN number ,dbincRlgTime IN date ,offlSCN IN number ,onlSCN IN number ,onlTime IN date ,cleanSCN IN number ,clean2SCN IN number ,clean2Time IN date ,targetSCN IN number ,action IN OUT NOCOPY rcvRec_t ,rmanCmd IN binary_integer ,foreignDbid IN number ,pluggedRonly IN binary_integer -- 1 => readonly, 0 => readwrite ,pluginSCN IN number ,pluginRlgSCN IN number ,pluginRlgTime IN date) RETURN boolean IS top rcvRec_t; c1frec rcvRec_t; -- filtered cursor1 record actCreSCN number; inpCreSCN number; actRlgSCN number; topRlgSCN number; BEGIN deb(DEB_ENTER, 'fetchRecoveryAction'); IF (pluginSCN != 0) THEN inpCreSCN := pluginSCN; ELSE inpCreSCN := creSCN; END IF; <> IF (rcvRecCursor.currc1.pluginSCN_obj != 0) THEN actCreSCN := rcvRecCursor.currc1.pluginSCN_obj; ELSE actCreSCN := rcvRecCursor.currc1.dfCreationSCN_obj; END IF; IF (rcvRecCursor.currc1.type_con is null OR rcvRecCursor.currc1.dfNumber_obj != fno OR actCreSCN != inpCreSCN) THEN c1frec.type_con := to_number(null); ELSE c1frec := rcvRecCursor.currc1; END IF; OPEN rcvRecCursor2_c( dbincKey => dbincKey ,fno => fno ,creSCN => creSCN ,dfCkpSCN => dfCkpSCN ,dbincRlgSCN => dbincRlgSCN ,dbincRlgTime => dbincRlgTime ,offlSCN => offlSCN ,onlSCN => onlSCN ,onlTime => onlTime ,cleanSCN => cleanSCN ,clean2SCN => clean2SCN ,clean2Time => clean2Time ,targetSCN => targetSCN ,c1frec => c1frec ,excludeAction => rcvRecCursor.excludeAction ,foreignDbid => foreignDbid ,pluggedRonly => pluggedRonly ,pluginSCN => pluginSCN ,pluginRlgSCN => pluginRlgSCN ,pluginRlgTime => pluginRlgTime); FETCH rcvRecCursor2_c INTO action; IF (rcvRecCursor2_c%NOTFOUND) THEN action.type_con := NULL; action.type_act := NULL; CLOSE rcvRecCursor2_c; deb(DEB_EXIT, 'no_data_found with: FALSE'); RETURN FALSE; END IF; IF (action.type_act = spanningRange_act_t OR action.type_act = cleanRange_act_t OR action.type_act = implicitRange_act_t) THEN -- Exclude this action when called next time. rcvRecCursor.excludeAction := rcvRecCursor.excludeAction + action.type_act; ELSE -- seek cursor1 to next as we are returning rcvRecCursor1_c record fetchCursor1RecoveryAction( dbincKey => dbincKey ,fno => fno ,creSCN => creSCN ,dfCkpSCN => dfCkpSCN ,dbincRlgSCN => dbincRlgSCN ,dbincRlgTime => dbincRlgTime ,offlSCN => offlSCN ,onlSCN => onlSCN ,onlTime => onlTime ,cleanSCN => cleanSCN ,clean2SCN => clean2SCN ,clean2Time => clean2Time ,targetSCN => targetSCN ,opcode => 1 ,foreignDbid => foreignDbid ,pluggedRonly => pluggedRonly ,pluginSCN => pluginSCN ,pluginRlgSCN => pluginRlgSCN ,pluginRlgTime => pluginRlgTime ,rmanCmd => rmanCmd); END IF; IF (action.compTime_con IS NULL AND -- was null in 8.0.2 action.type_con = backupSet_con_t) THEN action.compTime_con := stamp2date(action.bsStamp_con); END IF; IF (rmanCmd = obsoleteCmd_t AND action.type_act = incremental_act_t) THEN deb(DEB_PRINT, 'fetchRecoveryAction: incr backup set for obsolete cmd'); ELSE IF (computeRA_allRecords = TRUE#) THEN CLOSE rcvRecCursor2_c; deb(DEB_EXIT, 'with TRUE'); RETURN TRUE; END IF; IF (computeRA_fullBackups > 1) THEN CLOSE rcvRecCursor2_c; deb(DEB_EXIT, 'with TRUE'); RETURN TRUE; END IF; END IF; -- Discard this action if its fromSCN is not less than the most recently -- stacked actions's fromSCN. Also check that the resetlogs SCNs are the -- same. If they differ, then keep this action. This is necessary to -- avoid discarding a spanning offline range here. The real offline range -- and the spanning range(s) have the same fromSCN. IF (rcvRecStack.count > 0) THEN rcvRecTop(top); IF (action.pluginSCN_obj != 0) THEN actRlgSCN := action.pluginRlgSCN_obj; ELSE actRlgSCN := action.rlgSCN_act; END IF; IF (top.pluginSCN_obj != 0) THEN topRlgSCN := top.pluginRlgSCN_obj; ELSE topRlgSCN := top.rlgSCN_act; END IF; IF (not (action.fromSCN_act < top.fromSCN_act) AND actrlgSCN = toprlgSCN) THEN IF (debug) THEN deb(DEB_IN, 'discarding this action:'); printRcvRec(action); END IF; CLOSE rcvRecCursor2_c; GOTO retry; END IF; END IF; CLOSE rcvRecCursor2_c; deb(DEB_EXIT, 'with TRUE'); RETURN TRUE; END fetchRecoveryAction; -------------------------- openRecoveryActionCursor -------------------------- PROCEDURE openRecoveryActionCursor( dbincKey IN number ,fno IN number ,creSCN IN number ,dfCkpSCN IN number ,dbincRlgSCN IN number ,dbincRlgTime IN date ,offlSCN IN number ,onlSCN IN number ,onlTime IN date ,cleanSCN IN number ,clean2SCN IN number ,clean2Time IN date ,targetSCN IN number ,rmanCmd IN binary_integer ,foreignDbid IN number ,pluggedRonly IN binary_integer ,pluginSCN IN number ,pluginRlgSCN IN number ,pluginRlgTime IN date) IS openCursor1 boolean := FALSE; -- TRUE if cursor1 is to be opened opcode binary_integer := 0; -- seekNext or seekCurrent reqCreSCN number; inpCreSCN number; actCreSCN number; BEGIN deb(DEB_ENTER, 'openRecoveryActionCursor'); IF (pluginSCN != 0) THEN inpCreSCN := pluginSCN; ELSE inpCreSCN := creSCN; END IF; deb(DEB_IN,'target scn is ' || nvl(to_char(targetSCN), 'NULL') || ',creSCN=' || creSCN || ',dfCkpSCN=' || dfCkpSCN || ',dbincRlgSCN=' || dbincRlgSCN || ',offlSCN=' || offlSCN || ',onlSCN=' || onlSCN || ',cleanSCN=' || cleanSCN || ',clean2SCN=' || clean2SCN || ',fno=' || fno || ',pluginSCN=' || pluginSCN || ',rmanCmd=' || rmanCmd); deb(DEB_IN, 'currc1.type_con=' || nvl(to_char(rcvRecCursor.currc1.type_con),'NULL') || ' currc1.fno=' || nvl(to_char(rcvRecCursor.currc1.dfNumber_obj), 'NULL') || ' currc1.crescn=' || nvl(to_char(rcvRecCursor.currc1.dfCreationSCN_obj), 'NULL')); IF (rcvRecCursor1_c%ISOPEN) THEN deb(DEB_IN, 'cursor1 already open'); IF (tc_database = TRUE# OR isTranslatedFno(fno) = TRUE#) THEN deb(DEB_IN,'cursor1 translated'); IF (rcvRecCursor.reqpluginSCN != 0) THEN reqCreSCN := rcvRecCursor.reqpluginSCN; ELSE reqCreSCN := rcvRecCursor.reqcrescn; END IF; IF (rcvRecCursor.currc1.pluginSCN_obj != 0) THEN actCreSCN := rcvRecCursor.currc1.pluginSCN_obj; ELSE actCreSCN := rcvRecCursor.currc1.dfCreationSCN_obj; END IF; IF ((rcvRecCursor.reqfno = fno AND reqCreSCN >= inpCreSCN) OR (rcvRecCursor.reqfno > fno)) THEN -- user have requested files not in order of file#. For eg, like -- 'restore datafile 10, 6'. So, we have to reopen the cursor. deb(DEB_IN, 'cursor1 unusable'); openCursor1 := TRUE; ELSIF (rcvRecCursor.currc1.type_con is null OR rcvRecCursor.currc1.dfNumber_obj < fno OR (rcvRecCursor.currc1.dfNumber_obj = fno AND actCreSCN < inpCreSCN)) THEN deb(DEB_IN,'reusing cursor1 after seek'); opcode := 1; -- seekNext ELSIF (rcvRecCursor.currc1.dfNumber_obj = fno AND actCreSCN = inpCreSCN) THEN deb(DEB_IN,'reusing cursor1 with no seek'); opcode := 2; -- seekCurrent ELSE deb(DEB_IN,'do nothing to cursor1'); END IF; ELSE deb(DEB_IN,'cursor1 did not translate'); openCursor1 := TRUE; END IF; ELSE deb(DEB_IN,'cursor1 not open yet'); openCursor1 := TRUE; END IF; IF (openCursor1) THEN IF (rcvRecCursor1_c%ISOPEN) THEN CLOSE rcvRecCursor1_c; END IF; setDfTransClause(fno => fno); rcvRecCursor.currc1.type_con := to_number(null); deb(DEB_OPEN, 'rcvRecCursor1_c'); OPEN rcvRecCursor1_c(rmanCmd => rmanCmd); opcode := 1; -- seekNext END IF; -- seek cursor1 to required position IF (opcode != 0) THEN fetchCursor1RecoveryAction( dbincKey => dbincKey ,fno => fno ,creSCN => creSCN ,dfCkpSCN => dfCkpSCN ,dbincRlgSCN => dbincRlgSCN ,dbincRlgTime => dbincRlgTime ,offlSCN => offlSCN ,onlSCN => onlSCN ,onlTime => onlTime ,cleanSCN => cleanSCN ,clean2SCN => clean2SCN ,clean2Time => clean2Time ,targetSCN => targetSCN ,opcode => opcode ,foreignDbid => foreignDbid ,pluggedRonly => pluggedRonly ,pluginSCN => pluginSCN ,pluginRlgSCN => pluginRlgSCN ,pluginRlgTime => pluginRlgTime ,rmanCmd => rmanCmd); END IF; -- don't exclude any action rcvRecCursor.excludeAction := 0; -- remember current request rcvRecCursor.reqfno := fno; rcvRecCursor.reqcrescn := creSCN; rcvRecCursor.reqpluginSCN := pluginSCN; -- close other recovery record curors if open IF (rcvRecCursor1Filter_c%ISOPEN) THEN CLOSE rcvRecCursor1Filter_c; END IF; IF (rcvRecCursor2_c%ISOPEN) THEN CLOSE rcvRecCursor2_c; END IF; deb(DEB_EXIT); END openRecoveryActionCursor; ----------------------------- trimRecoveryActions ----------------------------- FUNCTION trimRecoveryActions( maxActions IN number ,containerMask IN number ,actionMask IN number) RETURN NUMBER IS dummy rcvRec_t; remaining number; BEGIN deb(DEB_ENTER,'trimRecoveryActions[function](maxactions='||maxActions||')'); IF (rcvRecStack.count > 0) THEN rcvRecPop(dummy); remaining := trimRecoveryActions(maxActions, containerMask, actionMask); -- If this record is one that would be selected by the masks in effect, -- then we can keep it if the remaining records on the -- the stack after trimming are less than the maxActions limit. -- Note: if other kinds of filtering are going to occur in -- getRecoveryAction, then that filtering should also be done here -- so that we don't count a record that won't be seen by the client. IF ((bitand(dummy.type_con, containerMask) = 0) OR (bitand(dummy.type_act, actionMask) = 0)) THEN -- This record is not selected by the masks in effect or -- or it is "keep" backup, so we keep it on the stack. rcvRecPush(dummy); deb(DEB_EXIT, 'with: '||to_char(remaining)); RETURN remaining; ELSE IF (remaining < maxActions) THEN rcvRecPush(dummy); -- put back on stack deb(DEB_EXIT, 'with: '||to_char(remaining+1)); RETURN remaining + 1; ELSE -- Cannot keep it. IF (debug) THEN deb(DEB_IN, 'deleting action:'); printRcvRec(dummy); deb(DEB_EXIT, 'with: '||to_char(remaining)); END IF; RETURN remaining; END IF; END IF; ELSE deb(DEB_EXIT, 'with: 0'); RETURN 0; END IF; END trimRecoveryActions; --------------------------- setCraGetAllCfBackups ---------------------------- PROCEDURE setCraGetAllCfBackups( flag IN boolean) IS BEGIN IF (flag) THEN deb(DEB_PRINT, 'craGetAllCfBackups is set to TRUE'); craGetAllCfBackups := TRUE#; ELSE deb(DEB_PRINT, 'craGetAllCfBackups is set to FALSE'); craGetAllCfBackups := FALSE#; END IF; END setCraGetAllCfBackups; ------------------------ isTranslatedArchivedLog ----------------------------- -- Return true if this archivelog is interested in translation. FUNCTION isTranslatedArchivedLog( thread# IN number ,sequence# IN number) RETURN BOOLEAN IS thrbck binary_integer; seqbck binary_integer; BEGIN -- thread# wrapped around 2G to keep things simple IF (thread# >= CONST2GVAL) THEN thrbck := CONST2GVAL - thread#; ELSE thrbck := thread#; END IF; -- sequence# wrapped around 2G to keep things simple IF (sequence# >= CONST2GVAL) THEN seqbck := CONST2GVAL - sequence#; ELSE seqbck := sequence#; END IF; IF NOT tc_threadSeq.exists(thrbck) THEN RETURN FALSE; ELSIF NOT tc_threadSeq(thrbck).exists(seqbck) THEN RETURN FALSE; ELSE RETURN TRUE; END IF; END isTranslatedArchivedLog; -------------------------- getRangeArchivedLogBackup -------------------------- FUNCTION getRangeArchivedLogBackup( rcvRec OUT NOCOPY rcvRec_t) RETURN binary_integer IS local rcvRec_t; BSstatus number; BEGIN deb(DEB_ENTER, 'getRangeArchivedLogBackup'); IF (getRecStackCount = 0) THEN -- there are no more backup records available. Report no backup available -- for remaining archivelogs. deb(DEB_EXIT, 'with: UNAVAILABLE'); RETURN UNAVAILABLE; END IF; rcvRecPop(local); -- If the stacked record is of status '*', then it is available -- on another device. IF (local.status_con = '*') THEN local.status_con := 'A'; BSstatus := AVAILABLE; ELSE BSstatus := SUCCESS; END IF; IF (debug) THEN printRcvRec(local); END IF; rcvRec := local; IF (BSstatus = AVAILABLE) THEN deb(DEB_EXIT, 'with: AVAILABLE'); RETURN AVAILABLE; ELSE deb(DEB_EXIT, 'with: SUCCESS'); RETURN SUCCESS; END IF; END getRangeArchivedLogBackup; ------------------------------ startWithPattern ------------------------------ -- convert a directory pattern to start with pattern FUNCTION startWithPattern( toDest IN varchar2) RETURN VARCHAR2 IS BEGIN IF (toDest IS NULL) THEN RETURN NULL; END IF; RETURN toDest || '%'; END startWithPattern; ------------------------------ extendKeepSCN --------------------------------- -- extending keep scn for the obsolete algorithm PROCEDURE extendKeepSCN(lbDfRec IN OUT NOCOPY lbDfRec_t, toSCN IN number, rlgSCN IN number, extendMask IN binary_integer, force IN boolean, dbgcomment IN varchar2) IS BEGIN IF (bitand(extendMask, extendFullSCN) != 0) THEN IF (force OR toSCN < lbDfRec.fullmin_scn) THEN lbDfRec.fullmin_scn := toSCN; lbDfRec.fullmin_rlgscn := rlgSCN; IF (debug) THEN deb(DEB_PRINT, dbgcomment || ': Extending fullmin_scn to ' || to_char(toSCN)); deb(DEB_PRINT, dbgcomment || ': Extending fullmin_rlgscn to ' || nvl(to_char(rlgSCN), 'null')); END IF; END IF; END IF; IF (bitand(extendMask, extendIncrSCN) != 0) THEN IF (force OR toSCN < lbDfRec.incrmin_scn) THEN lbDfRec.incrmin_scn := toSCN; lbDfRec.incrmin_rlgscn := rlgSCN; IF (debug) THEN deb(DEB_PRINT, dbgcomment || ': Extending incrmin_scn to ' || to_char(toSCN)); deb(DEB_PRINT, dbgcomment || ': Extending incrmin_rlgscn to ' || nvl(to_char(rlgSCN), 'null')); END IF; END IF; END IF; IF (bitand(extendMask, extendLogSCN) != 0) THEN IF (force OR toSCN < lbDfRec.logmin_scn) THEN lbDfRec.logmin_scn := toSCN; lbDfRec.logmin_rlgscn := rlgSCN; IF (debug) THEN deb(DEB_PRINT, dbgcomment || ': Extending logmin_scn to ' || to_char(toSCN)); deb(DEB_PRINT, dbgcomment || ': Extending logmin_rlgscn to ' || nvl(to_char(rlgSCN), 'null')); END IF; END IF; END IF; END extendKeepSCN; -- END_PRVCOMMON_RCVMAN_CODE ------------------------------ resetBsRecCache -------------------------------- PROCEDURE resetBsRecCache( reload IN boolean) IS BEGIN BEGIN deb(DEB_PRINT, '*****BsRecCache Statistics*****'); deb(DEB_PRINT, 'Cache size=' || to_char(cacheBsRecTable.bsRec.count) || ' hit=' || to_char(cacheBsRecTable.chit)); EXCEPTION WHEN no_data_found THEN deb(DEB_PRINT, 'No statistics available'); END; cacheBsRecTable.bsRec.delete; IF (NOT reload) THEN cacheBsRecTable.hitlist.delete; cacheBsRecTable.hitindex := 1; cacheBsRecTable.hint := noHint; END IF; cacheBSRecTable.chit := 0; cacheBsRecTable.mixcopy := FALSE; cacheBsRecTable.minbskey := 0; END resetBsRecCache; ----------------------------- setCachedDeviceType ----------------------------- FUNCTION setCachedDeviceType( type IN varchar2) RETURN binary_integer IS BEGIN FOR i IN 1..cacheBsRecTable.devicecount LOOP IF cacheBsRecTable.devicelist(i) = type THEN RETURN i; END IF; END LOOP; cacheBsRecTable.devicecount := cacheBsRecTable.devicecount + 1; cacheBsRecTable.devicelist(cacheBsRecTable.devicecount) := type; RETURN cacheBsRecTable.devicecount; END setCachedDeviceType; -------------------------------- lkBsRecCache --------------------------------- PROCEDURE lkBsRecCache( bskey IN number ,icopy IN binary_integer ,bsrec OUT NOCOPY cacheBsRecRow_t) IS bucket number; sb4_bucket binary_integer; BEGIN bucket := mod(bskey, CONST4GVAL); IF (bucket >= CONST2GVAL) THEN sb4_bucket := CONST2GVAL - bucket; ELSE sb4_bucket := bucket; END IF; BEGIN FOR i in 1..cacheBsRecTable.bsRec(sb4_bucket).bslist.count LOOP IF (cacheBsRecTable.bsRec(sb4_bucket).bslist(i).bskey = bskey) THEN bsrec := cacheBsRecTable.bsRec(sb4_bucket).bslist(i).copy(icopy); RETURN; END IF; END LOOP; EXCEPTION WHEN no_data_found THEN NULL; END; RAISE no_data_found; END lkBsRecCache; ----------------------------- addKeyToBsRecCache ------------------------------ FUNCTION addKeyToBsRecCache( bskey IN number) RETURN BOOLEAN IS bsk cacheBsRecBsKey_t; bslist cacheBsRecHash_t; bucket number; sb4_bucket binary_integer; bsindex binary_integer; BEGIN bucket := mod(bskey, CONST4GVAL); IF (bucket >= CONST2GVAL) THEN sb4_bucket := CONST2GVAL - bucket; ELSE sb4_bucket := bucket; END IF; -- Don't add this if this already exists in cache IF (cacheBsRecTable.bsRec.exists(sb4_bucket)) THEN FOR i in 1..cacheBsRecTable.bsRec(sb4_bucket).bslist.count LOOP IF (cacheBsRecTable.bsRec(sb4_bucket).bslist(i).bskey = bskey) THEN RETURN FALSE; END IF; END LOOP; bsindex := cacheBsRecTable.bsRec(sb4_bucket).bsindex; ELSE cacheBsRecTable.bsRec(sb4_bucket) := bsk; bsindex := cacheBsRecTable.bsRec(sb4_bucket).bsindex; cacheBsRecTable.bsRec(sb4_bucket).bslist(bsindex) := bslist; END IF; -- add this backupset to this bucket cacheBsRecTable.bsRec(sb4_bucket).bslist(bsindex).bskey := bskey; cacheBsRecTable.bsRec(sb4_bucket).bsindex := bsindex + 1; RETURN TRUE; END addKeyToBsRecCache; -------------------------------- addToBsRecCache ------------------------------ PROCEDURE addToBsRecCache( bskey IN number ,icopy IN binary_integer ,deviceindx IN binary_integer ,tag IN varchar2 ,copyNumber IN binary_integer ,code IN binary_integer) IS bsrec cacheBsRecRow_t; bucket number; sb4_bucket binary_integer; bsindex binary_integer; BEGIN bucket := mod(bskey, CONST4GVAL); IF (bucket >= CONST2GVAL) THEN sb4_bucket := CONST2GVAL - bucket; ELSE sb4_bucket := bucket; END IF; bsrec.deviceindx := deviceindx; bsrec.tag := tag; bsrec.copyNumber := copyNumber; bsrec.code := code; IF (NOT cacheBsRecTable.bsRec.exists(sb4_bucket)) THEN raise_application_error(-20999, 'internal error: addToBsRecCache1'); END IF; FOR i in 1..cacheBsRecTable.bsRec(sb4_bucket).bslist.count LOOP IF (cacheBsRecTable.bsRec(sb4_bucket).bslist(i).bskey = bskey) THEN cacheBsRecTable.bsRec(sb4_bucket).bslist(i).copy(icopy) := bsrec; RETURN; END IF; END LOOP; -- no bskey found raise_application_error(-20999, 'internal error: addToBsRecCache2'); END addToBsRecCache; ------------------------------ hitBsRecCache ----------------------------------- FUNCTION hitBsRecCache( bskey IN number ,deviceType IN varchar2 ,tag IN varchar2 ,mask IN binary_integer) RETURN BOOLEAN IS bucket number; sb4_bucket binary_integer; BEGIN -- Does device Type match? IF (deviceType != cacheBsRecTable.deviceType AND (deviceType IS NOT NULL OR cacheBsRecTable.deviceType IS NOT NULL)) THEN RETURN FALSE; END IF; -- Does tag match? IF (nvl(tag, ' ') != nvl(cacheBsRecTable.tag, nvl(tag, ' '))) THEN RETURN FALSE; END IF; -- Does mask match? IF (mask != cacheBsRecTable.mask) THEN RETURN FALSE; END IF; bucket := mod(bskey, CONST4GVAL); IF (bucket >= CONST2GVAL) THEN sb4_bucket := CONST2GVAL - bucket; ELSE sb4_bucket := bucket; END IF; IF (NOT cacheBsRecTable.bsRec.exists(sb4_bucket)) THEN IF (bskey < cacheBsRecTable.minbskey) THEN -- As this bskey < min(bskey) of valid status, this backupset -- is unusable. So, return it as hit and let lkBsRecCache return -- it as no-data-found. -- RETURN TRUE; ELSE RETURN FALSE; END IF; END IF; FOR i in 1..cacheBsRecTable.bsRec(sb4_bucket).bslist.count LOOP IF (cacheBsRecTable.bsRec(sb4_bucket).bslist(i).bskey = bskey) THEN cacheBsRecTable.chit := cacheBsRecTable.chit + 1; -- Collect statistics IF (cacheBsRecTable.hitindex > bsRecCacheLowLimit * 0.25) THEN cacheBsRecTable.hitindex := 1; END IF; -- Store this bskey in hit list cacheBsRecTable.hitlist(cacheBsRecTable.hitindex) := bskey; cacheBsRecTable.hitindex := cacheBsRecTable.hitindex + 1; RETURN TRUE; END IF; END LOOP; RETURN FALSE; END hitBsRecCache; ------------------------------- canMixCopy ------------------------------------ FUNCTION canMixCopy( bskey IN number) RETURN BOOLEAN IS bucket number; sb4_bucket binary_integer; BEGIN bucket := mod(bskey, CONST4GVAL); IF (bucket >= CONST2GVAL) THEN sb4_bucket := CONST2GVAL - bucket; ELSE sb4_bucket := bucket; END IF; IF (NOT cacheBsRecTable.bsRec.exists(sb4_bucket)) THEN raise_application_error(-20999, 'internal error: canMixCopy1'); END IF; FOR i in 1..cacheBsRecTable.bsRec(sb4_bucket).bslist.count LOOP IF (cacheBsRecTable.bsRec(sb4_bucket).bslist(i).bskey = bskey) THEN RETURN cacheBsRecTable.bsRec(sb4_bucket).bslist(i).mixCopy; END IF; END LOOP; raise_application_error(-20999, 'internal error: canMixCopy2'); END canMixCopy; ------------------------------ loadBsRecCache --------------------------------- PROCEDURE loadBsRecCache( from_bsRec IN rcvRec_t ,deviceType IN varchar2 DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,mask IN binary_integer ,mixcopy IN number) IS -- N.B. -- partial_avail means the backup set is unusable either because some -- pieces are unavailable, expired or total backuppieces < existing -- pieces. In other words the backupset is INCOMPLETE. -- A partial_avail backupset can be made available by doing crosscheck, -- catalog backuppiece(TBD), making pieces available. -- Group by device_type, tag, and copy#. This way, we can see if there is -- a set of pieces with the same copy# and tag. -- NOTE!! NOTE!! NOTE!! -- As isBsRecCacheMatch is called for every row, it also remembers the -- fact whether mixcopy will succeed or not. So, make sure you don't -- break that logic while adding new where clause. -- CURSOR loadBsRecCache_c IS SELECT bs.bs_key bskey, bp.device_type deviceType, bp.tag tag, bp.copy# copyNumber, 1 code, bs.pieces pieces FROM bp, bs WHERE loadBsRecCache.mixcopy = FALSE# AND bs.db_key = this_db_key AND bp.db_key = this_db_key AND bp.bs_key = bs.bs_key AND isBsRecCacheMatch(bs.bs_key, bp.device_type, bp.tag, bp.status) = TRUE# -- See NOTE AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs.bs_key, bs.pieces, bp.device_type, bp.tag, bp.copy# HAVING ((bitand(mask, dbms_rcvman.BSpartial_avail) = 0 AND count(DISTINCT piece#) = bs.pieces) OR (bitand(mask, dbms_rcvman.BSpartial_avail) <> 0 AND count(DISTINCT piece#) <= bs.pieces)) UNION ALL -- Allow a mix of copy numbers, but still the same tag. It is possible -- that the backup set is available only if pieces with different copy#'s -- are combined to form the complete set. SELECT bs.bs_key bskey, bp.device_type deviceType, bp.tag tag, to_number(null) copyNumber, 2 code, bs.pieces pieces FROM bp,bs WHERE loadBsRecCache.mixcopy = TRUE# AND bs.db_key = this_db_key AND bp.db_key = this_db_key AND bp.bs_key = bs.bs_key AND isBsRecCacheMatch(bs.bs_key, bp.device_type, bp.tag, bp.status) = TRUE# -- See NOTE AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs.bs_key, bs.pieces, bp.device_type, bp.tag HAVING ((bitand(mask, dbms_rcvman.BSpartial_avail) = 0 AND count(DISTINCT piece#) = bs.pieces) OR (bitand(mask, dbms_rcvman.BSpartial_avail) <> 0 AND count(DISTINCT piece#) <= bs.pieces)) UNION ALL -- Allow a mix of copy numbers, but still the same tag. It is possible -- that the backup set is available only if pieces with different copy#'s -- are combined to form the complete set. SELECT bs.bs_key bskey, bp.device_type deviceType, to_char(null) tag, to_number(null) copyNumber, 3 code, bs.pieces pieces FROM bp,bs WHERE loadBsRecCache.mixcopy = TRUE# AND bs.db_key = this_db_key AND bp.db_key = this_db_key AND bp.bs_key = bs.bs_key AND isBsRecCacheMatch(bs.bs_key, bp.device_type, bp.tag, bp.status) = TRUE# -- See NOTE AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <> 'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) GROUP BY bs.bs_key, bs.pieces, bp.device_type HAVING ((bitand(mask, dbms_rcvman.BSpartial_avail) = 0 AND count(DISTINCT piece#) = bs.pieces) OR (bitand(mask, dbms_rcvman.BSpartial_avail) <> 0 AND count(DISTINCT piece#) <= bs.pieces)) ORDER BY 1, -- bskey 5; -- code CURSOR loadRedundDf_c IS SELECT bs_key FROM (SELECT bs_key, file# FROM bdf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = bdf.dbinc_key AND file# >= nvl(from_bsRec.dfNumber_obj, 0) UNION ALL SELECT bs_key, 0 file# FROM bcf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = bcf.dbinc_key AND nvl(from_bsRec.dfNumber_obj, 0) = 0 UNION ALL SELECT bs_key, -1 file# FROM bsf WHERE from_bsRec.dfNumber_obj IS NULL AND from_bsRec.fromSCN_act = 0 AND bsf.db_key = this_db_key) GROUP BY bs_key ORDER BY min(file#), abs(bs_key - from_bsRec.bsKey_con); CURSOR loadRedundAl_c IS SELECT bs_key FROM (SELECT bs_key, thread#, sequence# FROM brl, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = brl.dbinc_key AND low_scn >= from_bsRec.logLowSCN_obj AND ((thread# = from_bsRec.logThread_obj AND sequence# >= from_bsRec.logSequence_obj) OR (thread# > from_bsRec.logThread_obj))) GROUP BY bs_key ORDER BY min(thread#), min(sequence#), abs(bs_key - from_bsRec.bsKey_con); CURSOR loadLocality_c(minbskey IN number, backupType IN varchar2) IS SELECT bs_key bskey FROM bs WHERE bs.db_key = this_db_key AND bs.bs_key >= loadLocality_c.minbskey AND (loadLocality_c.backupType IS NULL OR decode(bs.bck_type, 'L', 'L', 'D')=loadLocality_c.backupType) ORDER BY abs(bs_key - from_bsRec.bsKey_con); icopy binary_integer := 0; bsrec cacheBsRec_t; bsrow cacheBsRecRow_t; prev_bskey number := 0; low_bskey number; deviceindx binary_integer; addperset binary_integer; -- no: entries added to cache per set bslist numTab_t; addstatus boolean; freec number; backupType varchar2(1); BEGIN deb(DEB_ENTER, 'loadBsRecCache'); deb(DEB_IN, 'mixcopy=' || to_char(mixcopy)); -- Bug-10377075: moved from resetBsRecCache IF (NOT cacheBsRecTable.initlimit AND this_db_key IS NOT NULL) THEN -- dynamically adjust cache limit to number of backupsets SELECT count(*) INTO cacheBsRecTable.limit FROM bs, dbinc WHERE dbinc.db_key = this_db_key -- belongs to this database AND dbinc.db_key = bs.db_key; -- join bs and dbinc IF (cacheBsRecTable.limit > bsRecCacheHighLimit) THEN cacheBsRecTable.limit := bsRecCacheHighLimit; ELSIF (cacheBsRecTable.limit < bsRecCacheLowLimit) THEN cacheBsRecTable.limit := bsRecCacheLowLimit; END IF; cacheBsRecTable.initlimit := TRUE; END IF; IF (mixcopy = FALSE#) THEN -- initialize cache resetBsRecCache(TRUE); cacheBsRecTable.bsRec := bsrec; -- assign cache qualifiers cacheBsRecTable.tag := tag; cacheBsRecTable.deviceType := deviceType; cacheBsRecTable.mask := mask; ELSIF (cacheBsRecTable.mixcopy) THEN deb(DEB_EXIT, 'loadBsRecCache already loaded with mixcopy'); RETURN; ELSE cacheBsRecTable.mixcopy := TRUE; FOR rec in loadBsRecCache_c LOOP deviceindx := setCachedDeviceType(rec.deviceType); -- -- Loop through all loaded cache. If the cache contains a code -- which is less than current one, then no need to duplicate this -- record in cache as we found a valid record on a deviceType. -- <> FOR i in 1..255 LOOP BEGIN lkBsRecCache(bskey => rec.bskey, icopy => i, bsrec => bsrow); EXIT mixCopyLoop WHEN (bsrow.deviceindx = deviceindx AND bsrow.code < rec.code); EXCEPTION WHEN no_data_found THEN addToBsRecCache(bskey => rec.bskey, icopy => i, deviceindx => deviceindx, tag => rec.tag, copyNumber => rec.copyNumber, code => rec.code); EXIT mixCopyLoop; END; END LOOP; END LOOP; deb(DEB_EXIT, 'loadBsRecCache loaded with mixcopy'); RETURN; END IF; -- First add the requested one. addstatus := addKeyToBsRecCache(bskey => from_bsRec.bsKey_con); -- Decide how much redundant cache information we must load depending -- of user access pattern hint. IF (cacheBsRecTable.hint = redundantHint) THEN freec := cacheBsRecTable.limit; -- only redundant ELSIF (cacheBsRecTable.hint = localityHint) THEN freec := 0; -- only locality ELSE freec := floor(cacheBsRecTable.limit/2); -- redundant + locality END IF; -- Bulk collect records IF (freec != 0) THEN IF (from_bsRec.dfNumber_obj IS NOT NULL OR from_bsRec.fromSCN_act = 0) THEN deb(DEB_IN, 'loadRedundDf_c'); OPEN loadRedundDf_c; FETCH loadRedundDf_c BULK COLLECT INTO bslist LIMIT freec; CLOSE loadRedundDf_c; ELSIF (from_bsRec.logLowSCN_obj IS NOT NULL) THEN deb(DEB_IN, 'loadRedundAl_c'); OPEN loadRedundAl_c; FETCH loadRedundAl_c BULK COLLECT INTO bslist LIMIT freec; CLOSE loadRedundAl_c; END IF; -- Add it to cache table FOR i in 1..bslist.count LOOP addstatus := addKeyToBsRecCache(bslist(i)); END LOOP; END IF; freec := cacheBsRecTable.limit - bslist.count; bslist.delete; -- free memory -- Now load cache with hitlist entries FOR i in 1..cacheBsRecTable.hitlist.count LOOP IF (addKeyToBsRecCache(cacheBsRecTable.hitlist(i))) THEN freec := freec - 1; END IF; EXIT WHEN (freec <= 0); END LOOP; IF (cacheBsRecTable.minbskey = 0) THEN BEGIN SELECT nvl(min(bp.bs_key), 0) INTO cacheBsRecTable.minbskey FROM bp WHERE bp.db_key = this_db_key AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) AND ((mask = BSavailable AND bp.status = 'A') OR isStatusMatch(bp.status, mask) = TRUE#); EXCEPTION WHEN no_data_found THEN cacheBsRecTable.minbskey := 0; END; END IF; -- If we still have free entries, then load cache with locality -- entires IF (freec > 0) THEN backupType := to_char(null); IF (cacheBsRecTable.hint = redundantHint) THEN -- Find backup type. Assume incremental backup as datafile backup BEGIN SELECT decode(bck_type, 'L', 'L', 'D') INTO backupType FROM bs WHERE bs_key = from_bsRec.bsKey_con; EXCEPTION WHEN no_data_found THEN backupType := 'D'; END; END IF; -- Bulk collect records OPEN loadLocality_c(cacheBsRecTable.minbskey, backupType); LOOP FETCH loadLocality_c BULK COLLECT INTO bslist LIMIT freec; FOR i in 1..bslist.count LOOP IF (addKeyToBsRecCache(bslist(i))) THEN freec := freec - 1; END IF; END LOOP; bslist.delete; -- free memory EXIT WHEN (loadLocality_c%NOTFOUND OR freec <= 0); END LOOP; CLOSE loadLocality_c; END IF; -- Loop through records to load the cache FOR rec in loadBsRecCache_c LOOP deviceindx := setCachedDeviceType(rec.deviceType); -- Does this match a previous bskey? IF (prev_bskey = rec.bskey AND prev_bskey != 0) THEN icopy := icopy + 1; ELSE icopy := 1; -- start with index 1 because this is new set END IF; addToBsRecCache(bskey => rec.bskey, icopy => icopy, deviceindx => deviceindx, tag => rec.tag, copyNumber => rec.copyNumber, code => rec.code); -- save current bskey prev_bskey := rec.bskey; END LOOP; deb(DEB_IN, 'tag=' || nvl(cacheBsRecTable.tag, 'NULL') || ' deviceType=' || nvl(cacheBsRecTable.deviceType, 'NULL') || ' mask=' || to_char(mask)); -- Dump cached buckets deb(DEB_IN, 'Cache contains ' || to_char(cacheBsRecTable.bsRec.count) || ' buckets'); --low_bskey := cacheBsRecTable.bsRec.FIRST; --WHILE low_bskey IS NOT NULL LOOP -- deb(DEB_IN, to_char(low_bskey)); -- low_bskey := cacheBsRecTable.bsRec.NEXT(low_bskey); --END LOOP; deb(DEB_IN, 'Minimum bskey=' || to_char(cacheBsRecTable.minbskey)); deb(DEB_EXIT); END loadBsRecCache; --------------------------- cacheFindValidBackupSet -------------------------- PROCEDURE cacheFindValidBackupSet( bsRec IN rcvRec_t ,deviceType IN varchar2 DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,availableMask IN binary_integer) IS BEGIN deb(DEB_PRINT,'cacheFindValidBackupSet:' || ' bskey =' || to_char(bsRec.bsKey_con) || ' tag=' || nvl(tag, 'NULL') || ' deviceType=' || nvl(deviceType, 'NULL') || ' mask=' || to_char(availableMask)); IF (NOT hitBsRecCache(bskey => bsRec.bsKey_con, deviceType => deviceType, tag => tag, mask => availableMask)) THEN loadBsRecCache(from_bsRec => bsRec, deviceType => deviceType, tag => tag, mask => availableMask, mixcopy => FALSE#); cacheBsRecTable.chit := cacheBsRecTable.chit + 1; END IF; findValidCacheRequest.bskey := bsRec.bsKey_con; findValidCacheRequest.icopy := 0; END cacheFindValidBackupSet; -------------------------- cacheGetValidBackupSet ---------------------------- FUNCTION cacheGetValidBackupSet( validBackupSetRec OUT NOCOPY validBackupSetRec_t ,checkDeviceIsAllocated IN number DEFAULT FALSE#) RETURN number IS local validBackupSetRec_t; bsrec cacheBsRecRow_t; nullbsrec rcvRec_t; BEGIN <> findValidCacheRequest.icopy := findValidCacheRequest.icopy + 1; BEGIN lkBsRecCache( bskey => findValidCacheRequest.bskey, icopy => findValidCacheRequest.icopy, bsrec => bsrec); EXCEPTION WHEN no_data_found THEN -- If not index 1, then reached end of fetch. IF (findValidCacheRequest.icopy != 1) THEN RAISE; END IF; IF (findValidCacheRequest.bskey< cacheBsRecTable.minbskey) THEN deb(DEB_PRINT, 'bskey < cacheBsRecTable.minbskey'); RAISE; END IF; IF (NOT canMixCopy(bskey => findValidCacheRequest.bskey)) THEN RAISE; END IF; -- Haven't fetched one copy yet. Try mix and match of copy number. loadBsRecCache(from_bsRec => nullbsrec, deviceType => cacheBsRecTable.deviceType, tag => cacheBsRecTable.tag, mask => cacheBsRecTable.mask, mixcopy => TRUE#); lkBsRecCache( bskey => findValidCacheRequest.bskey, icopy => findValidCacheRequest.icopy, bsrec => bsrec); END; -- Fillup validateBackupSetRec information local.deviceType := cacheBsRecTable.devicelist(bsrec.deviceindx); local.tag := bsrec.tag; local.copyNumber := bsrec.copyNumber; local.code := bsrec.code; IF (checkDeviceIsAllocated = TRUE#) THEN IF (anyDevice = FALSE# AND isDeviceTypeAllocated(local.deviceType) = FALSE#) THEN GOTO nextRow; END IF; END IF; validBackupSetRec := local; -- set OUT mode arg deb(DEB_PRINT,'cacheGetValidBackupSet: returning valid rec deviceType=' || local.deviceType || ' tag=' || local.tag || ' copyNumber=' || to_char(local.copyNumber)); RETURN TRUE#; EXCEPTION WHEN no_data_found THEN RETURN FALSE#; END cacheGetValidBackupSet; ------------------------------ -- Package State Validation -- ------------------------------ -------------------------------- validateState -------------------------------- PROCEDURE validateState( anyCursor IN varchar2) IS BEGIN deb(DEB_ENTER,'validateState'); IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'database not set'); END IF; IF (this_dbinc_key IS NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; IF (translation_site_key IS NULL) THEN raise_application_error(-20082, 'Translation site key not set'); END IF; IF (anyCursor IS NOT NULL) THEN raise_application_error(-20203, 'Translation already started'); END IF; -- initialize this_site_key to current database db_unique_name, Here note -- that for 9i RMAN this_db_unique_name will be NULL. IF this_db_unique_name is NOT NULL AND this_site_key is NULL AND NOT this_dummy_instance THEN select site_key into this_site_key from node where db_unique_name=this_db_unique_name and db_key = this_db_key; deb(DEB_PRINT,'this_site_key=' || this_site_key); END IF; deb(DEB_EXIT,'validateState'); END; --------------------- -- Query Filtering -- --------------------- -- Private procedure to find the highest SCN that is associated with a -- timestamp less than or equal to timeStamp. This will give us an -- approximate untilSCN which we can use in the WHERE clause of the queries. -- -- Scanning of v$datafile is done so that the name translation cursors -- can rely on just the scnVar in their where clause. -- -- Offline ranges are scanned so that ones whose online checkpoint time -- is at or after the from time are skipped. -- Note that when computing the nearest SCN, we look at records from all -- database sites in DG environment, unlike filtering backups based on -- file accessiblity attributes. It is done so as to get the best approximation -- of SCN to corresponding time, looking at all information in catalog. ----------------------------- computeUntilSCN ------------------------------- PROCEDURE computeUntilSCN( timeStamp IN date ,scn OUT number ,allinc IN number) IS mySCN number; BEGIN deb(DEB_ENTER, 'computeUntilSCN'); -- When converting timestamp to approximate SCN, Since we never allow -- point in time recovery of current incarnation to previous incarnation -- We should select only those records that belong to current incarnation. SELECT nvl(max(rlh.low_scn),0) INTO mySCN FROM rlh, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE rlh.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (rlh.low_scn >= d2.reset_scn AND rlh.low_scn < d2.next_reset_scn)) AND rlh.low_time <= timeStamp; SELECT greatest(nvl(max(al.low_scn), 0), mySCN) INTO mySCN FROM al, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE al.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (al.low_scn >= d2.reset_scn AND al.low_scn < d2.next_reset_scn)) AND al.low_time <= timeStamp; SELECT greatest(nvl(max(bdf.ckp_scn),0), mySCN) INTO mySCN FROM bdf, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE bdf.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (bdf.ckp_scn >= d2.reset_scn AND bdf.ckp_scn < d2.next_reset_scn)) AND bdf.ckp_time <= timeStamp; SELECT greatest(nvl(max(bcf.ckp_scn),0), mySCN) INTO mySCN FROM bcf, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE bcf.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (bcf.ckp_scn >= d2.reset_scn AND bcf.ckp_scn < d2.next_reset_scn)) AND bcf.ckp_time <= timeStamp; SELECT greatest(nvl(max(cdf.ckp_scn),0), mySCN) INTO mySCN FROM cdf, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE cdf.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (cdf.ckp_scn >= d2.reset_scn AND cdf.ckp_scn < d2.next_reset_scn)) AND cdf.ckp_time <= timeStamp; SELECT greatest(nvl(max(cdf.rcv_fuzzy_scn),0), mySCN) INTO mySCN FROM cdf, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE cdf.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (cdf.rcv_fuzzy_scn >= d2.reset_scn AND cdf.rcv_fuzzy_scn < d2.next_reset_scn)) AND cdf.rcv_fuzzy_time <= timeStamp; SELECT greatest(nvl(max(ccf.ckp_scn),0), mySCN) INTO mySCN FROM ccf, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE ccf.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (ccf.ckp_scn >= d2.reset_scn AND ccf.ckp_scn < d2.next_reset_scn)) AND ccf.ckp_time <= timeStamp; SELECT greatest(nvl(max(xdf.ckp_scn),0), mySCN) INTO mySCN FROM xdf, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE xdf.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (xdf.ckp_scn >= d2.reset_scn AND xdf.ckp_scn < d2.next_reset_scn)) AND xdf.ckp_time <= timeStamp; SELECT greatest(nvl(max(xdf.rcv_fuzzy_scn),0), mySCN) INTO mySCN FROM xdf, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE xdf.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (xdf.rcv_fuzzy_scn >= d2.reset_scn AND xdf.rcv_fuzzy_scn < d2.next_reset_scn)) AND xdf.rcv_fuzzy_time<= timeStamp; SELECT greatest(nvl(max(df.create_scn), 0), mySCN) INTO mySCN FROM df, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE df.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (df.create_scn >= d2.reset_scn AND df.create_scn < d2.next_reset_scn)) AND df.create_time <= timeStamp; SELECT greatest(nvl(max(df.stop_scn), 0), mySCN) INTO mySCN FROM df, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE df.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (df.stop_scn >= d2.reset_scn AND df.stop_scn < d2.next_reset_scn)) AND df.stop_time <= timeStamp; SELECT greatest(nvl(max(offr.online_scn), 0), mySCN) INTO mySCN FROM offr, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE offr.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (offr.online_scn >= d2.reset_scn AND offr.online_scn < d2.next_reset_scn)) AND offr.online_time <= timeStamp; scn := mySCN; deb(DEB_EXIT, 'with '||to_char(scn)); END computeUntilSCN; ------------------------------ computeSpfileTime ------------------------------- PROCEDURE computeSpfileTime( inSCN IN number ,outTime OUT date ,allinc IN number ,estimated OUT boolean) IS startTime date; BEGIN deb(DEB_ENTER, 'computeSpfileTime'); outTime := NULL; -- Find matching restore point if one was in use. IF rpoint_set THEN estimated := FALSE; SELECT MIN(rtime) INTO outTime FROM (SELECT NVL(rsptime, creation_time) rtime FROM nrsp WHERE to_scn = inSCN - 1 UNION SELECT NVL(rsptime, creation_time) rtime FROM grsp WHERE to_scn = inSCN - 1); END IF; -- Use other objects to estimate a time IF outTime IS NULL THEN estimated := TRUE; startTime := to_date('01/01/1900','DD/MM/YYYY'); -- backup controlfile SELECT nvl(max(bs.start_time), startTime) INTO outTime FROM bcf,bs, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE bcf.dbinc_key = d2.dbinc_key AND bs.bs_key = bcf.bs_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (bcf.ckp_scn >= d2.reset_scn AND bcf.ckp_scn < d2.next_reset_scn)) AND bcf.ckp_scn <= inSCN; -- Not using ccf since controlfile copy does not maintain a start_time -- proxy copy controlfile SELECT greatest(nvl(max(xcf.start_time), startTime), outTime) INTO outTime FROM xcf, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc WHERE allinc = TRUE# START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE xcf.dbinc_key = d2.dbinc_key AND (allinc = FALSE# OR d2.next_reset_scn IS NULL OR (xcf.ckp_scn >= d2.reset_scn AND xcf.ckp_scn < d2.next_reset_scn)) AND xcf.ckp_scn <= inSCN; -- Verify we actually found some controlfile backups; IF startTime = outTime THEN outTime := NULL; END IF; END IF; -- Adjust the time to ensure we find spfiles backed up with controlfiles -- and adjust restore point times as they are TO not UNTIL. outTime := outTime + 1/24/60/60; -- Add one second deb(DEB_EXIT, 'with ' || to_char(outTime, 'DD-MON-YY HH24:MI:SS')); END computeSpfileTime; ---------------------------- -- Backup Set Translation -- ---------------------------- -- Lookup a backup set by primary key or recid/stamp -------------------------------- findBackupSet -------------------------------- PROCEDURE findBackupSet( bsKey IN number DEFAULT NULL ,recid IN number DEFAULT NULL ,stamp IN number DEFAULT NULL ,bsRec OUT NOCOPY bsRec_t) IS BEGIN deb(DEB_ENTER, 'findBackupSet'); deb(DEB_IN, 'bsKey:'||nvl(bsKey, -1)); IF (bsKey IS NOT NULL) THEN SELECT recid, stamp, bs_key, set_stamp, set_count, backup_type, incremental_level, elapsed_seconds, completion_time, status, pieces, decode (keep_options, 'LOGS' , KEEP_LOGS , 'NOLOGS' , KEEP_NOLOGS , 'BACKUP_LOGS' , KEEP_CONSIST , KEEP_NO), keep_until, substr(multi_section, 1, 1) INTO bsRec FROM rc_backup_set WHERE db_key = this_db_key AND findBackupSet.bsKey = bs_key; ELSE SELECT recid, stamp, bs_key, set_stamp, set_count, backup_type, incremental_level, elapsed_seconds, completion_time, status, pieces, decode (keep_options, 'LOGS' , KEEP_LOGS , 'NOLOGS' , KEEP_NOLOGS , 'BACKUP_LOGS' , KEEP_CONSIST , KEEP_NO), keep_until, substr(multi_section, 1, 1) INTO bsRec FROM rc_backup_set WHERE db_key = this_db_key AND findBackupSet.recid = recid AND findBackupSet.stamp = stamp; END IF; deb(DEB_EXIT); EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with error 20215'); raise_application_error(-20215, 'Backup set is missing'); END findBackupSet; --------------------------- -- Backup Set Validation -- --------------------------- -- Validate a backup set has all pieces available according the the specified -- arguments. Returns only 1 validBackupSetRec. There may be other sets of -- pieces that match the criteria. ------------------------------ findValidBackupSet ----------------------------- PROCEDURE findValidBackupSet( bsKey IN number ,pieceCount IN number ,deviceType IN varchar2 DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,availableMask IN binary_integer) IS bsRec rcvRec_t; BEGIN IF (bsRecCacheEnabled) THEN bsRec.bsKey_con := bsKey; bsRec.pieceCount_con := pieceCount; cacheFindValidBackupSet(bsRec => bsRec, deviceType => deviceType, tag => tag, availableMask => availableMask); RETURN; END IF; deb(DEB_ENTER, 'findValidBackupSet'); IF (pieceCount = 1) THEN IF (findValidBackupSet1P_c%ISOPEN) THEN CLOSE findValidBackupSet1P_c; END IF; -- NOTE!! NOTE!! NOTE!! -- We are aware the findValidBackupSet_c cursor doesn't scale -- when there are more backup pieces. So, let's for now optimize -- most case which is 1 piece in a backupset deb(DEB_OPEN, 'findValidBackupSet1P_c'); OPEN findValidBackupSet1P_c(bsKey => bsKey, pieceCount => pieceCount, deviceType => deviceType, tag => tag, mask => availableMask); getValidBackupSetCursor := 'findValidBackupSet1P_c'; ELSE -- more than one piece exists in this set IF (findValidBackupSet_c%ISOPEN) THEN CLOSE findValidBackupSet_c; END IF; deb(DEB_OPEN, 'findValidBackupSet_c'); OPEN findValidBackupSet_c(bsKey => bsKey, pieceCount => pieceCount, deviceType => deviceType, tag => tag, mask => availableMask); getValidBackupSetCursor := 'findValidBackupSet_c'; END IF; deb(DEB_IN, 'bsKey=' || to_char(bsKey) || ' pieceCount=' || to_char(pieceCount) ||' tag=' || nvl(tag, 'NULL')); deb(DEB_IN, ' deviceType=' || nvl(deviceType, 'NULL') || ' mask=' || to_char(availableMask)); getValidBackupSetLast.code := 99; -- init for getValidBackupSet deb(DEB_EXIT); END findValidBackupSet; ------------------------------ validateBackupSet ------------------------------ FUNCTION validateBackupSet( backupSetRec IN rcvRec_t ,tag IN varchar2 DEFAULT NULL ,tagMatchRequired IN boolean DEFAULT TRUE ,checkDeviceIsAllocated IN boolean DEFAULT TRUE ,availableMask IN binary_integer ,validRec OUT NOCOPY validBackupSetRec_t) RETURN binary_integer IS findTag bp.tag%TYPE; BEGIN deb(DEB_ENTER, 'validateBackupSet'); IF (tagMatchRequired) THEN findTag := tag; ELSE -- Caller wants to know about backup sets even if they don't match -- the tag he is looking for, so don't do any tag filtering in the -- cursor. findTag := NULL; END IF; deb(DEB_IN, 'calling findValidBackupSet with:'); deb(DEB_IN, ' tag=' || nvl(tag, 'NULL') || ' findTag=' || nvl(findTag, 'NULL') || ' tagMatchRequired=' || bool2char(tagMatchRequired) || ' checkDevice=' || bool2char(checkDeviceIsAllocated) || ' availableMask=' || to_char(availableMask)); IF (bsRecCacheEnabled) THEN cacheFindValidBackupSet(bsRec => backupSetRec, tag => findTag, availableMask => availableMask); ELSE findValidBackupSet(bsKey => backupSetRec.bsKey_con, pieceCount => backupSetRec.pieceCount_con, tag => findTag, availableMask => availableMask); END IF; deb(DEB_EXIT, 'with result from validateBackupSet0'); RETURN validateBackupSet0( tag => tag, tagMatchRequired => tagMatchRequired, checkDeviceIsAllocated => checkDeviceIsAllocated, validRec => validRec); END validateBackupSet; ------------------------------ -- Backup Piece Translation -- ------------------------------ ------------------------------- findBackupPiece ------------------------------- PROCEDURE findBackupPiece( bpKey IN number DEFAULT NULL ,bsKey IN number DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,handle IN varchar2 DEFAULT NULL ,deviceType IN varchar2 DEFAULT NULL ,copyNumber IN number DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable ,startBsKey IN number DEFAULT NULL) IS BEGIN deb(DEB_ENTER, 'findBackupPiece'); deb(DEB_IN, 'bpKey:'||nvl(bpKey, -1)|| ' and bsKey:'||nvl(bsKey, -1)); validateState(getBackupPieceCursor); IF (bpKey IS NOT NULL) THEN deb(DEB_OPEN, 'findBackupPieceBpKey'); OPEN findBackupPieceBpKey(bpKey => bpKey, tag => tag, handle => handle, deviceType => deviceType, copyNumber => copyNumber, statusMask => statusMask); getBackupPieceCursor := 'findBackupPieceBpKey'; ELSIF (bsKey IS NOT NULL) THEN deb(DEB_OPEN, 'findBackupPieceBsKey1'); OPEN findBackupPieceBsKey1(bsKey => bsKey, tag => tag, handle => handle, deviceType => deviceType, copyNumber => copyNumber, statusMask => statusMask); getBackupPieceCursor := 'findBackupPieceBsKey1'; ELSIF (startBsKey IS NOT NULL) THEN OPEN findBackupPieceBsKey2(startBsKey => startBsKey, tag => tag, statusMask => statusMask); getBackupPieceCursor := 'findBackupPieceBsKey2'; ELSE deb(DEB_OPEN, 'findBackupPiece_c'); OPEN findBackupPiece_c( tag => tag, handle => handle, deviceType => deviceType, copyNumber => copyNumber, statusMask => statusMask); getBackupPieceCursor := 'findBackupPiece_c'; END IF; -- Initialize all of the getBackupPiece variables to their default -- state. getBackupPieceNoRows.error := NULL; getBackupPieceDuplicates := TRUE#; getBackupPieceLast.pieceNumber := NULL; getBackupPieceDeviceType := deviceType; getBackupPieceExpectedPieces := NULL; getBackupPiecePieceCount := 0; getBackupPieceByHandle := FALSE; getBackupPieceAvailableMask := NULL; getBackupPieceSeekLast.bskey := NULL; getBackupPieceCopyNumber := NULL; getBackupPieceBskey := bsKey; deb(DEB_EXIT); END findBackupPiece; ------------------------------------------ -- Compute Recovery Actions Subroutines -- ------------------------------------------ ---------------------------------- addAction ---------------------------------- FUNCTION addAction( -- add to the rcvRecStack actionIN IN rcvRec_t -- if a backup set, we fill -- in the tag and deviceType ,partial_rcv IN boolean ,isAncestor IN boolean ,cf_scn IN number DEFAULT NULL ,cf_cretime IN date DEFAULT NULL ,cf_offrrid IN number DEFAULT NULL ,allCopies IN boolean DEFAULT FALSE ,doingRecovery IN boolean ,rmanCmd IN binary_integer) RETURN number IS dummy rcvRec_t; action rcvRec_t; validate_rc number; cf_count number; addRedo_rc number; chkact_rc number; tagMatchRequired boolean; validationRec validBackupSetRec_t; lowAction rcvRec_t; canrecover boolean; toSCN number; BEGIN deb(DEB_ENTER, 'addAction'); deb(DEB_IN, ' action.type_con='|| to_char(action.type_con)); action := actionIN; -- copy to local variable -- See if we have a gap that must be filled by redo. We must make this -- check first because we want to return OLD_REDO if that condition -- applies. We process actions in descending to_scn order, so it is OK to -- add a redo action to the stack even if we cannot apply this action -- because it fails validation or we don't have the right device type -- allocated. The next action we see would require at least this much -- redo. Note that this means we may have 2 adjacent redo actions on the -- stack. getRecoveryAction will merge them. IF (redoNeeded(action)) THEN -- We need to do a partial media recovery from this to_scn to -- get all the way to the lowActions's from_scn. -- N.B: -- A clean range always belongs to the current incarnation since it -- comes from the controlfile and the controlfile defines the current -- incarnation. Therefore, if this action is a clean range then we -- must be searching the current incarnation. It is not possible for -- the recoveryActionCursor to return a clean range when searching -- recursively, even if we didn't know our parent incarnation. If -- the clean range does not span the resetlogs SCN, then its from_scn -- will be greater than the target_scn for any recursive search, and -- the cursor will filter out the clean range. If the clean range -- does span the resetlogs SCN, then the clean range's from_scn -- becomes the target SCN for the recursive searches, and the cursor -- will also filter it out. Therefore, we know that the lowAction -- must be from the current incarnation. So we can simply add a redo -- action to fill the gap between the clean range and the lowAction. rcvRecGet(rcvRecStackState.lowAction, lowAction); addRedo_rc := addRedo(isAncestor, action.toSCN_act, action.rlgSCN_act, lowAction, partial_rcv, doingRecovery); IF (addRedo_rc = action_OLD_REDO) THEN -- We cannot apply this action because there is an offline range -- in the future of this action that we must apply, and it would -- require redo to reach the offline range. deb(DEB_EXIT, 'with addRedo_rc: '||to_char(addRedo_rc)); RETURN addRedo_rc; ELSE -- The other possible return codes from addRedo are -- OK, OLD_INC_REDO and FAIL. In either case, we are allowed to add -- this action to the stack. The FAIL case is when partial_rcv -- is false, so addRedo we just discards the actions previously -- stacked up to the save point, if any. NULL; END IF; ELSE -- redo not needed -- If the lowAction is an offline range, then this action must -- have a to_scn <= the offline range start SCN. -- We don't actually need to check this though. The cursor looks at -- only those actions with a to_scn <= the target_scn. NULL; END IF; -- If we get any backupsets on orphan branch or the ones not belonging to -- known incarnation, ignore them. chkact_rc := CheckRecAction(action); IF (chkact_rc = action_SKIP) THEN deb(DEB_EXIT, 'with action_SKIP'); RETURN action_SKIP; END IF; -- If this is a backup set, validate that it is available and we have the -- right device type allocated to restore/list it, and also check that it -- is available on a set of pieces with the right tag if a restoreTag has -- been specified. IF (action.type_con = backupSet_con_t) THEN IF (computeRA_allRecords = TRUE# OR restoreTag IS NULL) THEN tagMatchRequired := FALSE; ELSE tagMatchRequired := TRUE; END IF; IF (rmanCmd = rcvCopyCmd_t) THEN -- For recover copy command, tag specification applies to copies and -- not to incremental backups. tagMatchRequired := FALSE; END IF; -- Validate this backup set. This checks to see that a complete set of -- pieces are available on the same device type. We also discover -- if the pieces are all available with the same tag, and if so, from -- the same copy#. These values are then stuffed into the rcvRec. -- It is possible that a complete set of pieces is available on multiple -- device types if the backupset has been copied. In this case, the -- other device types are ignored. The ordering is simply the collating -- sequence order for the deviceType (alphabetic). Since "DISK" -- comes before "SBT_TAPE", this has the effect of prefering backupsets -- on disk over those on tape, which is desired. It would be a better -- implementation to have a preference column in the bp table, but -- that is not really needed until we have something other than -- SBT_TAPE and DISK. -- -- If we are really performing a restore, then the underlying query -- (findValidBackupSet) will be run twice, once from here and then again -- later when RMAN obtains the backup pieces for this set. This is -- unfortunate, but unavoidable. This is the reason why the copy# -- tag and deviceType are stuffed into the rcvRec, so that the later -- call to get the pieces can use those values in the query, which may -- improve its performance. Also note that if this backupset is used -- to restore multiple datafiles, then we validate it once for each -- datafile. It may be a good idea to keep a list of backup set keys -- that have been validated to improve performance when many datafiles -- are being restored. -- -- For RMAN commands such as LIST, the tag, deviceType, and copy#s will -- be ignored. As of 8.1.6, those commands use the findValidBackupSet -- procedure directly to obtain complete information about the -- availablity of the backup set. IF (rmanCmd != obsoleteCmd_t) THEN validate_rc := validateBackupSet(backupSetRec => action, tag => restoreTag, tagMatchRequired => tagMatchRequired, checkDeviceIsAllocated => TRUE, availableMask => computeRA_availableMask, validRec => validationRec); ELSE -- We have already validated the backupset in rcvRecCursor1_c cursor -- for obsolete command. validate_rc := SUCCESS; END IF; IF (validate_rc = dbms_rcvman.UNAVAILABLE) THEN deb(DEB_EXIT, '(backup set is unavailable) with: action_FAIL'); RETURN action_FAIL; ELSIF (validate_rc = dbms_rcvman.AVAILABLE) THEN -- We cannot restore/list this backup set because we don't have the -- right device type allocated. We want to remember this fact -- so that we can give the user a meaningful error message. deb(DEB_IN,'dont have required device type'); -- For RecoverCopy command, ComputeRecoveryAction terminates until -- a image copy is seen. So, we have to remember that at-least one -- needed backupset exists on unallocated device type and use that -- to initialize computeRA_avaiable when an image copy is seen. IF (rmanCmd = rcvCopyCmd_t) THEN computeRA_rcvCopy_avail := TRUE; ELSE computeRA_available := TRUE; END IF; deb(DEB_EXIT, 'returning FAIL'); RETURN action_FAIL; END IF; -- -- SUCCESS initialization is done later. See few lines below... -- ELSIF (action.type_con = proxyCopy_con_t) THEN -- Check that we have the right deviceType allocated. IF (anyDevice = FALSE# AND isDeviceTypeAllocated(action.deviceType_con) = FALSE#) THEN -- We cannot restore/list this backup set because we don't have the -- right device type allocated. We want to remember this fact -- so that we can give the user a meaningful error message. IF (rmanCmd = rcvCopyCmd_t) THEN computeRA_rcvCopy_avail := TRUE; ELSE computeRA_available := TRUE; END IF; deb(DEB_EXIT, '(dont have required device type for proxy)'|| ' with: action_FAIL'); RETURN action_FAIL; END IF; ELSIF (action.type_con = imageCopy_con_t) THEN -- Check that we have a disk device allocated. -- But, allow this for recover copy because it always needs a copy -- which can only exits on disk. IF (rmanCmd != rcvCopyCmd_t AND not diskDevice) THEN -- We don't have a disk device allocated, so we cannot -- use a datafile copy. computeRA_available := TRUE; deb(DEB_EXIT, '(dont have required device type for imagecopy)'|| ' with: action_FAIL'); RETURN action_FAIL; END IF; -- If about to stack a action for recover copy command, then -- o check if atleast a incremental that can be applied -- is available on allocated device type. IF (rmanCmd = rcvCopyCmd_t) THEN canrecover := FALSE; computeRA_available := computeRA_rcvCopy_avail; toSCN := action.toSCN_act; deb(DEB_IN,'rcvCopyCmd count= ' || rcvRecStack.count || ' lowAction= ' || rcvRecStackState.lowAction || ' toSCN= ' || toSCN); IF (rcvRecStack.count > 0) THEN FOR i IN REVERSE 1..rcvRecStackState.lowAction LOOP rcvRecGet(i, dummy); -- bug 7268955 IF (dummy.type_act = incremental_act_t OR dummy.type_con = offlineRangeRec_con_t) THEN -- Action cannot be applied on copy when -- dc checkpoint_change# < fromSCN. -- No more actions to scan as actions are stacked -- in toSCN order. EXIT WHEN (toSCN < dummy.fromSCN_act); -- this action can be applied on the image copy IF (dummy.type_con = offlineRangeRec_con_t) THEN toSCN := dummy.toSCN_act; deb(DEB_IN,'extending toSCN= ' || toSCN); ELSE -- this is incremental IF (anyDevice = TRUE# OR isDeviceTypeAllocated(dummy.deviceType_con)=TRUE#) THEN -- done as this incremental is available deb(DEB_IN,'canrecover is TRUE - 2'); canrecover := TRUE; computeRA_available := NULL; EXIT; ELSE -- Remember the incremental is available in other -- device and look up whether next action can be -- applied on image copy. deb(DEB_IN,'canrecover is FALSE'); canrecover := FALSE; computeRA_available := TRUE; END IF; END IF; ELSIF (debug) THEN deb(DEB_IN,'rcvCopyCmd skipping'); printRcvRec(dummy); END IF; END LOOP; END IF; -- If we there is no valid incrementals/offline ranges stacked, -- then reset the stack and check for next incrementals/offline -- ranges and image copy. IF not canrecover THEN resetrcvRecStack; deb(DEB_EXIT, 'no valid incrementals with: action_FAIL'); RETURN action_FAIL; END IF; END IF; ELSIF (action.type_con = offlineRangeRec_con_t AND action.type_act = offlineRange_act_t) THEN -- First see if this offline range is in the current controlfile. -- If so, we are cool. If not, then check that there is at least one -- controlfile copy that contains this offline range. -- Note that if a controlfile contains no kccor records, then -- the oldest recid will be zero. Both krmk.pc and krbc.c obey -- this convention. IF (cf_cretime = action.cfCreationTime_con AND action.recid_con >= cf_offrrid AND cf_offrrid > 0 AND -- contains at least 1 record cf_scn >= action.toSCN_act) THEN NULL; -- range is in current cf, we're cool ELSE SELECT count(*) INTO cf_count FROM ccf WHERE ccf.create_time = action.cfCreationTime_con AND ccf.min_offr_recid <= action.recid_con AND ccf.ckp_scn >= action.toSCN_act AND ((user_site_key = ccf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(ccf.site_key, this_site_key))))) AND ccf.min_offr_recid > 0; -- contains at least 1 record IF (cf_count = 0) THEN deb(DEB_EXIT, '(no controlfile copy with offline range)'|| ' with: action_FAIL'); RETURN action_FAIL; END IF; END IF; END IF; -- If old incarnation redo required followed by the current action, -- trim the stack as all the old actions till savepoint are useless for rcv. IF (addRedo_rc = action_OLD_INC_REDO) THEN rcvRecStackState.lowAction := 0; rcvRecStack.trim(rcvRecStack.last - greatest(rcvRecStackState.savePoint, rcvRecStackState.top)); deb(DEB_IN,'trimming stack, rcvRecStackCount='|| rcvRecStack.count); END IF; <> -- Backupset fetched for obsoleteCmd_t is validated already in the -- cursor because it is faster and obsolete command is not interested -- in (tag, deviceType, copyNumber). IF (validate_rc = SUCCESS AND action.type_con = backupSet_con_t AND rmanCmd != obsoleteCmd_t) THEN ------------SUCESS initialization for backupSet type------------- -- IF computeRA_allRecords is true and restoreTag is not -- null, then we set tagMatchRequired to false, so it is possible -- that this backupset did not validate with the right tag. Check -- that the tag in the validationRec matches the restoreTag. If not, -- then leave the tag_con field in the action record null. This will -- cause this action to be skipped when getRecoveryAction is called. IF (validationRec.tag = restoreTag OR restoreTag IS NULL) THEN action.tag_con := validationRec.tag; END IF; action.deviceType_con := validationRec.deviceType; action.copyNumber_con := validationRec.copyNumber; END IF; IF (debug) THEN printRcvRec(action); END IF; IF (action.type_act = full_act_t) THEN IF (thisBackupAge < rcvRecBackupAge) THEN -- -- We must be here when doing restore failovers. Latest backup was -- tried for restore and didn't succeed. So, the rcvRecBackupAge -- was incremented and did restore failover. This means, we need to -- assume that all earlier backups are not usable and not needed -- for restore. -- -- -- If this full action is not an interested source, then why to -- stack this?. -- -- IF (NOT isValidAction(action)) THEN -- deb(DEB_EXIT, 'with: action_SKIP'); -- RETURN action_SKIP; -- END IF; -- -- This full action was tried already but didn't succeed in -- restore. So, let's assume that the full backup is not available -- for restore. -- deb(DEB_IN, 'skipping action because thisBackupAge (' || thisBackupAge || ') < rcvRecBackupAge(' || rcvRecBackupAge || ')'); thisBackupAge := thisBackupAge + 1; deb(DEB_EXIT, 'with: action_SKIP'); RETURN action_SKIP; END IF; -- If we have already stacked required full backups, then skip this. IF (rcvRecStackState.fullBackups >= computeRA_fullBackups) THEN -- Any backups with keep_option must be kept on the stack while -- processing obsoleteCmd_t. IF (rmanCmd = obsoleteCmd_t AND action.keep_options = 0) THEN deb(DEB_IN, 'skipping action because this action has NO KEEP'); deb(DEB_EXIT, 'with action_SKIP'); RETURN action_SKIP; ELSIF (rmanCmd != obsoleteCmd_t) THEN deb(DEB_IN, 'skipping action because stack has enough fullBackups'); deb(DEB_EXIT, 'with action_SKIP'); RETURN action_SKIP; END IF; END IF; END IF; -- I think this is a sheer wastage of memory/cpu for obsolete command. IF (rmanCmd = obsoleteCmd_t and NOT isValidAction(action)) THEN deb(DEB_EXIT, 'with action_SKIP'); RETURN action_SKIP; END IF; rcvRecPush(action); -- add record for this action deb(DEB_IN, ' Added action:'); IF (allCopies AND action.type_con = backupSet_con_t) THEN -- Keep calling validateBackupSet0 until it fails to return SUCCESS. deb(DEB_IN,'allCopies is TRUE, trying to add other copies'); validate_rc := validateBackupSet0(tag => restoreTag, tagMatchRequired => tagMatchRequired, checkDeviceIsAllocated => TRUE, validRec => validationRec); IF (validate_rc <> SUCCESS) THEN GOTO done; END IF; GOTO addAnother; END IF; <> -- Update the lowAction pointer and the savePoint pointer if necessary. IF (action.type_act = full_act_t) THEN -- new full means new savePoint deb(DEB_IN, ' action.type_act is range/full => setting savePoint='|| to_char(rcvRecStack.last)); rcvRecStackState.savePoint := rcvRecStack.last; -- Bump fullBackups counter. -- For obsoleteCmd count only non-keep backups. IF (rmanCmd != obsoleteCmd_t OR action.keep_options = KEEP_NO) THEN rcvRecStackState.fullBackups := rcvRecStackState.fullBackups + 1; END IF; ELSIF (rcvRecStackState.lowAction = 0) THEN -- no curent lowAction, then set rcvRecStackState.lowAction := rcvRecStack.last; ELSIF (action.fromSCN_act < rcvRecStack(rcvRecStackState.lowAction).fromSCN_act) THEN rcvRecStackState.lowAction := rcvRecStack.last; -- new lowAction END IF; deb(DEB_EXIT, 'with: action_OK'); RETURN action_OK; END addAction; ---------------------------- computeRecoveryActions2 -------------------------- -- this function is called by main computeRecoveryActions function and is also -- calls itself recursively. FUNCTION computeRecoveryActions2( fno IN number -- Datafile number. ,crescn IN number -- Datafile creation SCN. ,cretime IN date -- Datafile creation time. ,df_rlgscn IN number -- Datafile resetlogs SCN. -- Null if this is a RESTORE or LIST, else this -- is the value in the datafile header for the -- datafile we are RECOVERing. ,df_rlgtime IN date -- Datafile resetlogs time. -- Null if df_rlgscn is null, else value from -- datafile header. ,df_ckpscn IN number -- Datafile checkpoint SCN. -- Null if df_rlgscn is null, else value from -- datafile header. ,offlscn IN number -- kccfeofs (may be 0). ,onlscn IN number -- kccfeonc (0 if offlscn is 0). ,onltime IN date -- kccfeonc_time (ignored if kccfeofs is 0) ,cleanscn IN number -- kccfecps if either SOR or WCC set, else 0. ,clean2scn IN number -- CF ckpt SCN if WCC set. -- Infinity if SOR bit set, else 0. ,clean2time IN date -- cf_checkpoint_time if WCC, -- SYSDATE if SOR bit set. ,allowfuzzy IN boolean -- TRUE if can be fuzzy at until SCN/time, -- FALSE if not. ,partial_rcv IN boolean ,target_scn IN number -- This is the SCN to which we want to recover -- the datafile. Null unless we call -- ourselves recursively to search a prior -- incarnation or doing a BMR on standby. -- We search prior incarnations -- only when there is an offline range -- spanning the resetlogs SCN. So this SCN is -- really the offline range start SCN. -- For BMR on standby, we search backups older -- that current df_ckpscn. So, this SCN is -- really the df_ckpscn for BMR on standby. ,dbinc_key IN number -- The key of the database incarnation to -- search in. This may be different from -- df_rlgscn, and it may be different from the -- package var "this_dbinc_key". ,cf_scn IN number ,cf_cretime IN date -- controlfile creation time. -- NULL if none mounted. ,cf_offrrid IN number -- recid of oldest offline range in controlfile -- NULL if none mounted. ,test_search IN boolean -- if TRUE, then we have called ourself -- recursively and are not sure this is an -- ancestral incarnation ,done IN OUT boolean -- set to TRUE if successful. (IN mode so -- we can test it). ,allCopies IN boolean -- if true, then stack all copies of -- a backup set if it was duplexed or copied. ,recover IN boolean ,rmanCmd IN binary_integer ,foreignDbid IN number ,pluggedRonly IN binary_integer -- 1 => readonly, 0 => readwrite ,pluginSCN IN number ,pluginRlgSCN IN number ,pluginRlgTime IN date ,creation_thread IN number ,creation_size IN number) -- NOTES: -- If all copies is FALSE, then we should at least count the number of copies -- of a backup set in addAction and save this number in the rcvRec. This -- would tell us how many copies of a backupset there are, which would be -- useful for RMAN to know. For example, if the customer wanted redundancy=2 -- but there was only 1 copy of an incremental backup, then this incremental -- could not be considered as a substitute for archivelogs. RETURN boolean IS --------------------- -- LOCAL VARIABLES -- --------------------- null_action rcvRec_t; action rcvRec_t; -- current row lastAction rcvRec_t; parentDbincKey number; -- my parent dbinc's key dbinc_rlgscn number; -- resetlogs scn for dbinc_key dbinc_rlgtime date; -- resetlogs time for dbinc_key CURSOR dbinc_cursor(db_key number, rstscn number) IS SELECT dbinc_key FROM dbinc WHERE dbinc.db_key = dbinc_cursor.db_key AND dbinc.reset_scn < dbinc_cursor.rstscn; dbinc_row dbinc_cursor%ROWTYPE; savedrcvRecStackState rcvRecStackState_t; addAction_rc number; -- return code addRedo_rc number; -- return code isAncestor boolean; -- TRUE if we find an action we could -- apply with to_scn = target_scn rc boolean; -- return code from recursive search -- of possible parent incarnation done_flag boolean; -- for use in recursive calls doingRecovery boolean; -- doing RECOVER stack_df_rlgscn number; savedBackupAge number; BEGIN deb(DEB_ENTER, 'computeRecoveryActions2'); done := FALSE; doingRecovery := recover; IF (doingRecovery is null) THEN -- the call is from prior 9.0.2 rman -- we could be doing either recover/restore IF (df_rlgscn is not null) THEN -- doing RECOVER doingRecovery := TRUE; ELSE doingRecovery := FALSE; END IF; END IF; IF (doingRecovery) THEN -- doing RECOVER deb(DEB_IN, ' Doing recovery.'); ELSE deb(DEB_IN, ' Not doing recovery.'); END IF; -- Compute this dbinc's resetlogs SCN and time -- SELECT reset_scn, reset_time INTO dbinc_rlgscn, dbinc_rlgtime FROM dbinc WHERE dbinc.dbinc_key = computeRecoveryActions2.dbinc_key; IF (doingRecovery AND canApplyAnyRedo = FALSE# ) THEN -- doing RECOVER using older than 10i RMAN -- Check that this incarnation is reasonable for the -- datafile we are trying to recover. We only want to look at -- incarnations with a resetlogs SCN >= the datafile's resetlogs SCN. IF (dbinc_rlgscn < df_rlgscn) THEN deb(DEB_IN, 'dbinc_rlgscn < df_rlgscn (' || to_char(dbinc_rlgscn) || ' < ' || to_char(df_rlgscn) || ')'); deb(DEB_EXIT,'computeRecoveryActions2 - 1 with FALSE'); RETURN FALSE; ELSIF (dbinc_rlgscn = df_rlgscn AND dbinc_rlgtime <> df_rlgtime) THEN deb(DEB_IN, 'dbinc_rlgtime <> df_rlgtime'); deb(DEB_EXIT,'computeRecoveryActions2 - 2 with FALSE'); RETURN FALSE; END IF; END IF; IF (not test_search) THEN -- This is not test search of a prior incarnation, therefore -- we can set this flag right away. deb(DEB_IN, ' This is ancestor.'); isAncestor := TRUE; ELSE isAncestor := FALSE; END IF; openRecoveryActionCursor( dbincKey => dbinc_key ,fno => fno ,creSCN => crescn ,dfCkpSCN => df_ckpscn ,dbincRlgSCN => dbinc_rlgscn ,dbincRlgTime => dbinc_rlgtime ,offlSCN => offlscn ,onlSCN => onlscn ,onlTime => onltime ,cleanSCN => cleanscn ,clean2SCN => clean2scn ,clean2Time => clean2time ,targetSCN => target_scn ,rmanCmd => rmanCmd ,foreignDbid => foreignDbid ,pluggedRonly => pluggedRonly ,pluginSCN => pluginSCN ,pluginRlgSCN => pluginRlgSCN ,pluginRlgTime=> pluginRlgTime); ---------------------------------------- -- Process the rows from rcvReccursor -- ---------------------------------------- <> LOOP <> IF (fetchRecoveryAction( dbincKey => dbinc_key ,fno => fno ,creSCN => crescn ,dfCkpSCN => df_ckpscn ,dbincRlgSCN => dbinc_rlgscn ,dbincRlgTime => dbinc_rlgtime ,offlSCN => offlscn ,onlSCN => onlscn ,onlTime => onltime ,cleanSCN => cleanscn ,clean2SCN => clean2scn ,clean2Time => clean2time ,targetSCN => target_scn ,action => action ,rmanCmd => rmanCmd ,foreignDbid => foreignDbid ,pluggedRonly => pluggedRonly ,pluginSCN => pluginSCN ,pluginRlgSCN => pluginRlgSCN ,pluginRlgTime=> pluginRlgTime)) THEN deb(DEB_PRINT, 'fetched recovery action'); printRcvRec(action); ----------------------------- -- Compute isAncestor flag -- ----------------------------- IF (bitand(action.type_con, backupMask_con_t) > 0 AND action.toSCN_act = target_scn) THEN deb(DEB_IN, ' This is ancestor.'); isAncestor := TRUE; END IF; IF (action.type_con = offlineRangeRec_con_t) THEN ------------------- -- OFFLINE RANGE -- ------------------- deb(DEB_IN, ' found an offline range' || ' from=' || to_char(action.fromSCN_act) || ' to=' || to_char(action.toSCN_act)); IF (action.type_act = spanningRange_act_t) THEN ---------------------------------------------- -- OFFLINE RANGE spanning multiple resetlogs - ---------------------------------------------- deb(DEB_IN, ' offline range started before this resetlogs SCN'); -- We don't need to add this action to the stack. Just set -- addAction_rc to action_OK so that we will take the -- right path below and search our parent incarnation. addAction_rc := action_OK; ELSE addAction_rc := addAction(actionIN => action, partial_rcv => partial_rcv, isAncestor => isAncestor, cf_scn => cf_scn, cf_cretime => cf_cretime, cf_offrrid => cf_offrrid, doingRecovery => doingRecovery, rmanCmd => rmanCmd); END IF; ELSIF (action.type_con = backupSet_con_t AND action.type_act = incremental_act_t) THEN ---------------------------- -- INCREMENTAL BACKUP SET -- ---------------------------- deb(DEB_IN, 'found an incremental backup set'); addAction_rc := addAction(actionIN => action, partial_rcv => partial_rcv, isAncestor => isAncestor, allCopies => allCopies, doingRecovery => doingRecovery, rmanCmd => rmanCmd); ELSIF (action.type_act = full_act_t) THEN ------------------------------------------------------------ -- DATAFILE COPY or FULL or LEVEL 0 BACKUP SET/PROXY COPY -- ------------------------------------------------------------ deb(DEB_IN, 'found a copy/full/level0/proxy copy'); IF (doingRecovery) THEN -- if doing a RECOVER -- Datafilecopies and full backups are not interesting when -- doing a RECOVER. So don't stack this action, but do set the -- computeRA_restorable flag if we know this -- incarnation is an ancestor of the previous one or this is -- the current incarnation, and we can apply redo to this full -- backup if it would need some. IF (rmanCmd = rcvCopyCmd_t) THEN addAction_rc := addAction(actionIN => action, partial_rcv => partial_rcv, isAncestor => isAncestor, allCopies => allCopies, doingRecovery => doingRecovery, rmanCmd => rmanCmd); ELSE IF (isAncestor) THEN rcvRecTop(lastAction); IF (not redoNeeded(action) OR canAddRedo(isAncestor, action.toSCN_act, action.rlgSCN_act, lastAction, partial_rcv, doingRecovery) <> action_OLD_REDO) THEN computeRA_restorable := TRUE; END IF; END IF; addAction_rc := action_SKIP; END IF; ELSE -- not doing a RECOVER addAction_rc := addAction(actionIN => action, partial_rcv => partial_rcv, isAncestor => isAncestor, allCopies => allCopies, doingRecovery => doingRecovery, rmanCmd => rmanCmd); END IF; ELSE ------------------------ -- UKNOWN ACTION KIND -- ------------------------ deb(DEB_IN, 'unknown container type: ' || to_char(action.type_con) || ' or action type: ' || to_char(action.type_act)); -- We should signal an internal error here, but we don't have -- that ability from PLSQL. So just raise 20999. deb(DEB_EXIT, 'with error 20999'); raise_application_error(-20999, 'unknown action kind'); END IF; -- "switch" on ACTION.KIND --------------------------------------------- -- Handle the return code from addAction() -- --------------------------------------------- deb(DEB_IN, 'addAction returned code ' || to_char(addAction_rc)); IF (addAction_rc = action_OK) THEN -- the action was added -------------------------------------- -- Check for terminating conditions -- -------------------------------------- IF (rcvRecStackState.savePoint > 0 AND computeRA_allRecords = FALSE# AND rcvRecStackState.fullBackups >= computeRA_fullBackups) THEN -- If we now have a fullKind action and we are not looking -- for all records, then we are done. Note that this must -- be a RESTORE because savePoint is never non-zero if we are -- doing RECOVER because fullKind actions are discarded -- by RECOVER. deb(DEB_IN, 'savePoint > 0' || ' and computeRA_allRecords = FALSE#'); done := TRUE; deb(DEB_PRINT,'done set to true - 1'); EXIT action_loop; END IF; IF (doingRecovery) THEN -- if doing RECOVER IF (action.type_con = offlineRangeRec_con_t) THEN -- For offline ranges, the datafile must be checkpointed -- precisely at the offline range start SCN. IF (df_ckpscn = action.fromSCN_act) THEN done := TRUE; deb(DEB_PRINT,'done set to true - 2'); EXIT action_loop; END IF; ELSE -- For any other action, it is sufficient if the datafile -- is checkpointed >= the action's from_scn. IF (df_ckpscn >= action.fromSCN_act) THEN done := TRUE; deb(DEB_PRINT,'done set to true - 3'); EXIT action_loop; END IF; END IF; END IF; ---------------------------------------- -- Offline Range Spanning a Resetlogs -- ---------------------------------------- -- If the action was an offline range and this offline range -- started before the current resetlogs, then we must search the -- previous incarnation. This can happen only when canAddAnyRedo -- is false for catalog version. IF (action.type_con = offlineRangeRec_con_t AND action.fromSCN_act < dbinc_rlgscn AND canApplyAnyRedo = FALSE#) THEN deb(DEB_IN, 'offline range spanning a resetlogs'); -- This action is the last action we will fetch from this -- incarnation. There cannot be any other actions from this -- incarnation with a lower toSCN. Incremental backups cannot -- span resetlogs, and any full backup checkpointed at the end -- of the offline range would have been returned from the -- cursor already because full backups have a fromSCN of 0. -- Get parent's dbinc key -- SELECT parent_dbinc_key INTO parentDbincKey FROM dbinc WHERE dbinc.dbinc_key = computeRecoveryActions2.dbinc_key; IF (parentDbincKey is null) THEN -- we don't know our parent -- Open a cursor to get all dbinc rows for this database -- that could possibly be our parent incarnation. We will -- search them all. If exactly one turns out to work, -- then we assume it is our parent. If more than one works, -- then we cannot decide and will fail. If none works, well -- too bad. deb(DEB_OPEN, 'dbinc_cursor'); OPEN dbinc_cursor(this_db_key, dbinc_rlgscn); deb(DEB_IN, 'doing scan of all possible parent incarnations, top=' || to_char(rcvRecStack.last)); <> LOOP FETCH dbinc_cursor INTO dbinc_row; EXIT WHEN dbinc_cursor%NOTFOUND; deb(DEB_IN, 'starting test search of incarnation key=' || to_char(dbinc_row.dbinc_key)); savedrcvRecStackState := rcvRecStackState; rcvRecStackState.top := rcvRecStack.last; savedBackupAge := thisBackupAge; -- Bypass age checks to get true result. Any actions -- stacked in this test search is unaccounted for -- backup age calculation. thisBackupAge := rcvRecBackupAge; rc := computeRecoveryActions2(fno, crescn, cretime, df_rlgscn, df_rlgtime, df_ckpscn, offlscn, onlscn, onltime, cleanscn, clean2scn, clean2time, allowfuzzy, partial_rcv, rcvRecStack(rcvRecStack.count).fromSCN_act, dbinc_row.dbinc_key, cf_scn, cf_cretime, cf_offrrid, TRUE, done_flag, allCopies, doingRecovery, rmanCmd, foreignDbid, pluggedRonly, pluginSCN, pluginRlgSCN, pluginRlgTime, creation_thread, creation_size); -- Trim any actions that may have gotten stacked in -- the test search. If rc was FALSE, then none should -- have been stacked, but it doesn't hurt to do a -- trim(0), so do it anyway. deb(DEB_IN, 'last=' || to_char(rcvRecStack.last) || ' trimming last ' || to_char(rcvRecStack.last - rcvRecStackState.top) ); rcvRecStack.trim(rcvRecStack.last - rcvRecStackState.top); rcvRecStackState := savedrcvRecStackState; deb(DEB_PRINT,'restoring rcvRecStackCount after test search'||rcvRecStack.count); thisBackupAge := savedBackupAge; deb(DEB_IN, 'count is now '|| to_char(rcvRecStack.count)); IF (rc) THEN -- We found an action with a checkpoint SCN that -- matched the offline range start SCN. So we just -- searched what was most likely our parent -- incarnation. It is possible that the datafile went -- offline clean at the same SCN in two different -- incarnations. This is very unlikely, but if it -- happens, then there is no way for us to decide -- which incarnation was the correct one. So save the -- dbinc_key, trim back any actions we may have added, -- and continue the search. If we exhaust the -- dbinc_cursor and find only 1 incarnation that -- worked, we can assume it was the right one. -- If we find more than one, then we have to give up. IF (parentDbincKey is null) THEN parentDbincKey := dbinc_row.dbinc_key; ELSE -- We've found a second incarnation that could be -- our parent. Since we cannot distintuish between -- them, we are done. deb(DEB_IN, 'aborting search due to ambiguous ancestory'); CLOSE dbinc_cursor; EXIT action_loop; END IF; END IF; END LOOP; -- dbinc_loop END IF; -- if we don't know our parent -- If we know our parent from the catalog or just found our -- parent in the search above, then search our parent. IF (parentDbincKey is not null) THEN deb(DEB_IN, 'starting search of parent incarnation key='|| to_char(parentDbincKey)); rc := computeRecoveryActions2(fno, crescn, cretime, df_rlgscn, df_rlgtime, df_ckpscn, offlscn, onlscn, onltime, cleanscn, clean2scn, clean2time, allowfuzzy, partial_rcv, rcvRecStack(rcvRecStack.last).fromSCN_act, parentDbincKey, cf_scn, cf_cretime, cf_offrrid, FALSE, done_flag, allCopies, doingRecovery, rmanCmd, foreignDbid, pluggedRonly, pluginSCN, pluginRlgSCN, pluginRlgTime, creation_thread, creation_size); IF (done_flag) THEN deb(DEB_PRINT,'done set to true - 4'); done := TRUE; END IF; IF (action.type_act = spanningRange_act_t) THEN -- We went recursive because of an offline range which -- spanned multiple resetlogs. In this case, we -- propagate the return code from the recursive search. -- The incarnation we are currently searching is an -- ancestor if the incarnation we just searched -- recursively is an ancestor. isAncestor := rc; END IF; END IF; -- we know or found our parent EXIT action_loop; END IF; -- offline range start SCN < dbinc_rlgscn ELSIF (addAction_rc = action_FAIL) THEN -- FAIL -- Cannot apply this action for some reason. That's OK, -- there may be other actions that we can apply. NULL; ELSIF (addAction_rc = action_SKIP) THEN -- SKIP -- We don't want to add this action to the stack. NULL; ELSIF (addAction_rc = action_OLD_REDO) THEN -- OLD_REDO -- If OLD_REDO, then the to_scn of this action does not reach the -- from_scn of the last stacked action, and we are searching an -- ancestral incarnation where we cannot apply redo. We know we -- will not see any actions with a higher to_scn, so this stops -- our search. No other actions from this incarnation are usable -- because we cannot reach the spanning offline range from them. EXIT action_loop; ELSE -- unknown return code deb(DEB_EXIT, 'with error 20999'); raise_application_error(-20999, 'unknown add action return code'); END IF; ELSE -- rcvRecCursor exhausted deb(DEB_IN, 'end of cursor reached'); EXIT action_loop; END IF; -- if fetchRecoveryAction END LOOP; IF (done) THEN deb(DEB_EXIT,'computeRecoveryActions2 - 3'); RETURN isAncestor; END IF; -- There is nothing else in this incarnation that we can -- restore or apply. If we are doing RECOVER (doingRecovery is TRUE), -- then we can just apply redo to this datafile. IF (doingRecovery) THEN -- if doing RECOVER deb(DEB_PRINT,'computeRecoveryActions2:recovery final check'); deb(DEB_IN, 'crescn=' || crescn ||';this_reset_scn='||this_reset_scn); deb(DEB_IN, 'df_rlgscn='||df_rlgscn||';df_rlgtime='|| to_char(df_rlgtime)); IF (rmanCmd = rcvCopyCmd_t) THEN -- We couldn't find any copies that could be recovered. This is -- because no incrementals are available, no copies are available -- or none of existing incrementals can be applied over copies. deb(DEB_PRINT,'computeRecoveryActions2: no copies stacked'); resetrcvRecStack; done := FALSE; ELSIF (rcvRecStack.count > 0) THEN -- if found some action deb(DEB_PRINT,'computeRecoveryActions2:'|| rcvRecStack.count || ' actions stacked'); -- Add a redo action to go from the datafile's checkpoint up to -- the from_scn of the last action we stacked. If partial_rcv is -- FALSE, then addRedo() will trim the actions we stacked because we -- cannot use them. rcvRecTop(lastAction); stack_df_rlgscn := df_rlgscn; IF (df_rlgscn is NULL) THEN stack_df_rlgscn := this_reset_scn; END IF; addRedo_rc := addRedo(isAncestor, df_ckpscn, stack_df_rlgscn, lastAction, partial_rcv, doingRecovery); IF (addRedo_rc = action_OK OR addRedo_rc = action_FAIL OR addRedo_rc = action_OLD_INC_REDO) THEN -- OK means we could do the partial recovery from the datafile -- checkpoint up to the first action we found. FAIL means -- we could not add the redo, but we don't really need to apply -- any. OLD_INC_REDO means we need to recover thru incarnation. -- In either case, the file can be successfully recovered. -- done := TRUE; END IF; IF (addRedo_rc = action_OLD_INC_REDO) THEN -- clear stack, we need to apply redo from checkpoint rcvRecStack.trim(rcvRecStack.count); END IF; ELSE deb(DEB_PRINT,'computeRecoveryActions2: no actions stacked'); -- We didn't stack any actions. So this datafile is recoverable only -- if is from the current incarnation. Set the done flag accordingly. IF (df_rlgscn = this_reset_scn AND df_rlgtime = this_reset_time) THEN done := TRUE; ELSIF (action_OLD_INC_REDO = addRedo(isAncestor, df_ckpscn, df_rlgscn, null, partial_rcv, doingRecovery)) THEN done := TRUE; END IF; -- we can create datafile if it is known to incarnation table. -- need to check creation size and plugin dbid to not to allow -- the create datafile IF (df_rlgscn is NULL AND -- df may be created during rcv cretime > cf_cretime AND -- df created after cf creation crescn > nvl(inc_list(max_inc_idx-1).prior_resetlogs_change#, inc_list(max_inc_idx-1).resetlogs_change#)) THEN -- known to incarnation table done := TRUE; END IF; END IF; ELSE -- not doing a RECOVER -- We may decide to trim the stack here. Never trim beyond the top -- of the stack. The top is normally 0, but it is non-zero when -- we are making a test search to find our parent incarnation. IF (rcvRecStackState.savePoint = 0) THEN -- We did not find any fullKind actions that we could apply. -- This means we cannot restore this file or there is nothing -- interesting to list. Note that we may have some actions -- stacked at this point, but they are not useful. -- Here is where we could do a CREATE DATAFILE. deb(DEB_IN, 'could try create datafile'); rcvRecStack.trim(rcvRecStack.count - rcvRecStackState.top); rcvRecStackState.lowAction := rcvRecStack.count; deb(DEB_IN, 'rcvRecStackState :' || rcvRecStackState.top || ' ' || rcvRecStackState.savePoint || ' ' || rcvRecStackState.lowAction); -- purge all actions -- Add an action to create datafile -- need to check creation size and plugin dbid to not to allow -- the create datafile. For now server signals error on attempting -- create datafile command ... -- We also compare creation_thread/creation_size to NULL so that old -- RMAN behavior is not changed until these values are resynced. IF ((computeRA_available = FALSE) AND -- no backup found foreignDbid = 0 AND -- not a foreign file (creation_thread IS NULL or creation_thread > 0) AND -- creation thread is known (creation_size IS NULL or creation_size > 0) AND -- creation size is known (cretime > cf_cretime) AND -- df created after cf (crescn > nvl(inc_list(max_inc_idx-1).prior_resetlogs_change#, inc_list(max_inc_idx-1).resetlogs_change#)) AND -- known to inc table (bitand(createdatafile_act_t, getRA_actionMask) != 0) AND -- user asked for the action restoreTag is null) THEN -- from tag not specified action := null_action; action.type_con := datafile_con_t; action.type_act := createdatafile_act_t; action.dfNumber_obj := fno; action.dfCreationSCN_obj := crescn; action.fromSCN_act := 0; action.toSCN_act := crescn; action.toTime_act := cretime; action.pluggedRonly_obj := 0; action.pluginSCN_obj := 0; FOR inc_idx in 0..max_inc_idx-1 LOOP IF (crescn > inc_list(inc_idx).resetlogs_change#) THEN deb(DEB_PRINT, 'data file created with resetlogs scn='|| inc_list(inc_idx).resetlogs_change#); action.rlgSCN_act := inc_list(inc_idx).resetlogs_change#; action.rlgTime_act := inc_list(inc_idx).resetlogs_time; exit; END IF; END LOOP; IF (action.rlgSCN_act IS NULL) THEN deb(DEB_IN, 'rlgSCN is null'); addAction_rc := action_FAIL; ELSE addAction_rc := addAction(actionIN => action, partial_rcv => partial_rcv, isAncestor => isAncestor, allCopies => allCopies, doingRecovery => doingRecovery, rmanCmd => rmanCmd); END IF; IF (addAction_rc = action_OK) THEN -- the action was added done := TRUE; deb(DEB_IN, 'added create datafile action for '||fno); ELSE deb(DEB_IN, 'failed to add create datafile action for '||fno); END IF; END IF; IF computeRA_available = TRUE THEN deb(DEB_IN, 'need different device type channels to restore'); END IF; ELSE -- We found at least 1 fullKind action, so the file is -- definitly restorable or there is something interesting to LIST. -- No actions stacked after the savePoint are usable though because -- there is no fullKind backup to which we could apply these -- actions. So trim all actions up to the savePoint. deb(DEB_IN, 'trim all actions after savePoint='|| to_char(greatest(rcvRecStackState.savePoint, rcvRecStackState.top))); rcvRecStack.trim(rcvRecStack.last - greatest(rcvRecStackState.savePoint, rcvRecStackState.top)); done := TRUE; END IF; deb(DEB_PRINT,'computeRecoveryActions2: set rcvRecStackCount='||rcvRecStack.count); END IF; -- if doing recover IF (done) THEN deb(DEB_IN, 'done is TRUE'); ELSE deb(DEB_IN, 'done is FALSE'); END IF; deb(DEB_EXIT,'computeRecoveryActions2 - 4'); RETURN isAncestor; END computeRecoveryActions2; ----------------------------- getParentIncarnation ---------------------------- FUNCTION getParentIncarnation( dbinc_key OUT number ,resetlogs_change# OUT number) RETURN number IS BEGIN deb(DEB_ENTER, 'getParentIncarnation'); SELECT resetlogs_change#, parent_dbinc_key INTO resetlogs_change#, dbinc_key FROM rc_database_incarnation where dbinc_key = getParentIncarnationKey; deb(DEB_EXIT, 'with: TRUE#'); EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END getParentIncarnation; ----------------------------- getPointInTimeInc ------------------------------ -- Return the reset_scn to which the given scn belongs. If not found -- return null. FUNCTION getPointInTimeInc( toscn IN number) RETURN NUMBER IS pitrlgscn number; BEGIN IF (getPointInTimeInc.toscn >= this_reset_scn) THEN RETURN this_reset_scn; END IF; SELECT dbinc.reset_scn INTO pitrlgscn FROM (SELECT reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key) dbinc WHERE dbinc.reset_scn <= getPointInTimeInc.toscn AND dbinc.next_reset_scn > getPointInTimeInc.toscn; RETURN pitrlgscn; EXCEPTION WHEN no_data_found THEN RETURN NULL; END getPointInTimeInc; --------------------------------- get_cfUntilScn ------------------------------ -- Recompute the set until scn when a restore point is used and a suitable -- long term CF exists. With this a user can specify a restore point and -- restore the CF that was built as part of the long term backup. FUNCTION get_cfUntilScn RETURN number IS ret_scn number := untilSCN; max_scn number; max_tag bp.tag%type; -- Get the high scn for archivelog backups or checkpoint scn for df backups CURSOR keepscn(scn NUMBER) IS SELECT bckscn, tag FROM (SELECT brl.next_scn bckscn, bp.tag FROM bs, brl, bp WHERE bs.bs_key = brl.bs_key AND bs.bs_key = bp.bs_key AND bs.keep_options > 0 AND bp.status = 'A' AND brl.low_scn <= scn AND brl.next_scn > scn AND this_site_key = bs.site_key AND this_dbinc_key = dbinc_key UNION SELECT xal.next_scn bckscn, xal.tag FROM xal WHERE xal.keep_options > 0 AND xal.status = 'A' AND xal.low_scn <= scn AND xal.next_scn > scn AND this_site_key = xal.site_key AND this_dbinc_key = dbinc_key UNION SELECT bdf.ckp_scn bckscn, bp.tag FROM bs, bdf, bp WHERE bs.bs_key = bdf.bs_key AND bs.bs_key = bp.bs_key AND bs.keep_options > 0 AND bp.status = 'A' AND bdf.ckp_scn = scn+1 AND this_site_key = bs.site_key AND this_dbinc_key = dbinc_key UNION SELECT xdf.ckp_scn bckscn, xdf.tag FROM xdf WHERE xdf.keep_options > 0 AND xdf.status = 'A' AND xdf.ckp_scn = scn+1 AND this_site_key = xdf.site_key AND this_dbinc_key = dbinc_key) ORDER BY bckscn DESC; BEGIN IF rpoint_set THEN -- Find a long term backup matching the given scn OPEN keepscn(untilSCN - 1); FETCH keepscn INTO max_scn, max_tag; CLOSE keepscn; -- Get the closest cf following that backup, with that name. SELECT NVL(MIN(cfscn)+1, untilSCN) INTO ret_scn FROM (SELECT bcf.ckp_scn cfscn FROM bcf, bs, bp WHERE bcf.bs_key = bs.bs_key AND bs.bs_key = bp.bs_key AND bp.status = 'A' AND this_site_key = bs.site_key AND this_dbinc_key = dbinc_key AND bp.tag = max_tag AND bcf.ckp_scn > max_scn UNION ALL SELECT ckp_scn FROM ccf WHERE this_site_key = site_key AND this_dbinc_key = dbinc_key AND status = 'A' AND tag = max_tag AND ckp_scn > max_scn UNION ALL SELECT ckp_scn FROM xcf WHERE this_site_key = site_key AND this_dbinc_key = dbinc_key AND status = 'A' AND tag = max_tag AND ckp_scn > max_scn); deb(DEB_PRINT, 'new scn is ' || ret_scn); END IF; RETURN ret_scn; END get_cfUntilScn; ----------------------------- ISDuplicateAlName ------------------------------ -- Return TRUE if the same name is already returned for a given log sequence FUNCTION IsDuplicateAlName(samelog IN number, filename varchar2) RETURN BOOLEAN IS duplicate number; BEGIN -- Catalog may have duplicate names one with standby flag and other with -- primary for the same log. We should not return the duplicate names as it -- will cause controlfile to mark deleted for the previous row with same -- name when processing translated archivelog list. duplicate := FALSE#; IF (samelog = TRUE#) THEN FOR log_idx in 0..max_lognames_idx-1 LOOP IF lognames_list(log_idx) = filename THEN duplicate := TRUE#; EXIT; END IF; END LOOP; lognames_list(max_lognames_idx) := filename; max_lognames_idx := max_lognames_idx + 1; ELSE lognames_list(0) := filename; max_lognames_idx := 1; END IF; IF duplicate = TRUE# THEN deb(DEB_PRINT, 'Filter duplicate log name' || filename); RETURN TRUE; ELSE RETURN FALSE; END IF; END IsDuplicateAlName; ----------------------------------------------- -- *** PUBLIC FUNCTION/PROCEDURE SECTION *** -- ----------------------------------------------- ----------------------------- -- Get Current Incarnation -- ----------------------------- PROCEDURE getCurrentIncarnation( db_id IN number ,reset_scn OUT number ,reset_time OUT date) IS BEGIN deb(DEB_ENTER, ' getCurrentIncarnation'); deb(DEB_IN, ' db_id=' || to_char(db_id)); SELECT dbinc.reset_scn, dbinc.reset_time INTO reset_scn, reset_time FROM db, dbinc WHERE db_id = getCurrentIncarnation.db_id -- should return 1 row AND dbinc.dbinc_key = db.curr_dbinc_key; deb(DEB_EXIT, 'reset_scn='||reset_scn||' reset_time='||reset_time); EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with error 20001'); raise_application_error(-20001, 'Database not found'); END; --------------------------------------------- -- Get Primary DF Name (from catalog only) -- --------------------------------------------- FUNCTION getPrimaryDfName( fno IN NUMBER) RETURN VARCHAR2 IS local_fname VARCHAR2(513); BEGIN SELECT fname INTO local_fname FROM site_dfatt s, df, node WHERE node.database_role = 'PRIMARY' AND node.db_key = this_db_key AND node.site_key = s.site_key AND s.df_key = df.df_key AND df.dbinc_key = node.dbinc_key AND df.file# = fno; RETURN local_fname; END getPrimaryDfName; ------------------------------ -- Set Database Incarnation -- ------------------------------ --------------------------------- setDatabase --------------------------------- PROCEDURE setDatabase( db_name IN varchar2 ,reset_scn IN number ,reset_time IN date ,db_id IN number ,db_unique_name IN varchar2 default NULL ,site_aware IN boolean default FALSE ,dummy_instance IN boolean default FALSE) IS local dbinc%rowtype; -- local variables dbnm dbinc.db_name%TYPE; dbnm_in dbinc.db_name%TYPE; current_inc varchar2(3); rid varchar2(18); CURSOR inc_record_c IS SELECT * FROM rc_database_incarnation where db_key = this_db_key and status <> 'ORPHAN' order by resetlogs_change# desc; inc_rec rc_database_incarnation%ROWTYPE; inc_idx binary_integer; inc_num number; ever_resynced number; BEGIN deb(DEB_ENTER, 'setDatabase'); this_db_key := NULL; -- clear in case exception raised this_dbinc_key := NULL; this_reset_scn := NULL; this_reset_time := NULL; this_db_unique_name := NULL; translation_site_key := NULL; this_site_key := NULL; dbnm_in := upper(db_name); -- verify that this package is compatible with the recovery catalog BEGIN SELECT null INTO local.db_key FROM rcver WHERE version = catalogVersion; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with error 20299'); raise_application_error(-20299, 'Recovery catalog version mismatch'); END; -- If the target database is mounted, then we have the db_id (kccfhdbi). -- This can be used to find the row in the db table corresponding -- to the target database, and it will indicate which incarnation -- is currently considered the current one. IF (db_id is not NULL) THEN deb(DEB_IN, ' db_id=' || to_char(db_id)); BEGIN SELECT db.db_key, curr_dbinc_key, dbinc.reset_scn, dbinc.reset_time, dbinc.db_name INTO local.db_key, local.dbinc_key, local.reset_scn, local.reset_time, local.db_name FROM db, dbinc WHERE db_id = setDatabase.db_id -- should return 1 row AND dbinc.dbinc_key = db.curr_dbinc_key; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with error 20001'); raise_application_error(-20001, 'Database not found'); END; -- Validate SCN only only if the target database is indeed mounted IF (dbnm_in is NOT NULL) THEN -- Now validate that the resetlogs SCN we were passed matches that -- of the current incarnation of this database. If not, then -- a reset database should be done, or the wrong controlfile is -- mounted. BEGIN SELECT decode(dbinc.dbinc_key, db.curr_dbinc_key, 'YES', 'NO'), dbinc.db_name, dbinc.rowid INTO current_inc, dbnm, rid FROM db, dbinc WHERE db.db_key = dbinc.db_key AND db.db_id = setDatabase.db_id AND dbinc.reset_scn = setDatabase.reset_scn AND dbinc.reset_time = setDatabase.reset_time; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with error 20003'); raise_application_error(-20003, 'Database incarnation not found'); END; IF (current_inc = 'NO') THEN deb(DEB_EXIT, 'with error 20011'); raise_application_error(-20011, 'Database incarnation not current'); END IF; IF (dbnm != dbnm_in) THEN deb(DEB_PRINT, 'DB_NAME changed from '||dbnm||' to '|| dbnm_in); UPDATE dbinc SET dbinc.db_name = dbnm_in WHERE rowid = rid; COMMIT; END IF; END IF; ELSIF (dbnm_in is NOT NULL) THEN -- If db_id is unknown, try using db_name deb(DEB_IN, 'db_id is null'); BEGIN SELECT db.db_key, db.curr_dbinc_key, dbinc.reset_scn, dbinc.reset_time INTO local.db_key, local.dbinc_key, local.reset_scn, local.reset_time FROM db, dbinc WHERE db.curr_dbinc_key = dbinc.dbinc_key AND dbinc.db_name = dbnm_in; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with error 20001'); raise_application_error(-20001, 'Database not found'); WHEN too_many_rows THEN deb(DEB_EXIT, 'with error 20005'); raise_application_error(-20005, 'Database name is ambiguous'); END; ELSE deb(DEB_EXIT, 'with error 20006'); raise_application_error(-20006, 'Database name is missing'); END IF; -- In case that db_unique_name is NULL, then we know this is pre-10i -- database IF dummy_instance THEN deb(DEB_PRINT, 'dummy_instance is TRUE'); this_dummy_instance := TRUE; END IF; this_db_unique_name := upper(db_unique_name); this_db_key := local.db_key; this_dbinc_key := local.dbinc_key; this_reset_scn := local.reset_scn; this_reset_time := local.reset_time; deb(DEB_PRINT, 'this_db_unique_name= ' || this_db_unique_name); deb(DEB_PRINT, 'this_site_key= ' || this_site_key); deb(DEB_PRINT, 'this_db_key='||this_db_key); deb(DEB_PRINT, 'this_dbinc_key='||this_dbinc_key); -- open cursor deb(DEB_PRINT, 'Fetching incarnation records'); inc_idx := 0; FOR inc_rec IN inc_record_c LOOP inc_list(inc_idx) := inc_rec; deb(DEB_PRINT, 'incarnation record id=' || inc_idx); deb(DEB_PRINT, 'icprs=' || inc_list(inc_idx).prior_resetlogs_change# || ',icprc='|| inc_list(inc_idx).prior_resetlogs_time); deb(DEB_PRINT, 'icrls=' || inc_list(inc_idx).resetlogs_change# || ',icrlc='|| inc_list(inc_idx).resetlogs_time); deb(DEB_PRINT, 'icpinc=' || inc_list(inc_idx).parent_dbinc_key); deb(DEB_PRINT, 'icflg=' || inc_list(inc_idx).status); inc_idx := inc_idx + 1; END LOOP; max_inc_idx := inc_idx; deb(DEB_PRINT, 'number of incarnation=' || max_inc_idx); BEGIN SELECT site_key into this_site_key FROM node where db_unique_name=upper(this_db_unique_name) AND db_key = this_db_key; deb(DEB_PRINT, 'this_site_key=' || this_site_key); EXCEPTION WHEN no_data_found THEN deb(DEB_PRINT, 'this_site_key is NULL'); END; BEGIN IF site_aware THEN client_site_aware := 1; deb(DEB_PRINT, 'client_site_aware=' || client_site_aware); END IF; IF site_aware AND this_site_key is not NULL THEN translation_site_key := this_site_key; deb(DEB_PRINT, 'translation_site_key=' || translation_site_key); ELSE BEGIN SELECT site_key into translation_site_key FROM node WHERE database_role='PRIMARY' AND db_key = this_db_key; deb(DEB_PRINT, 'translation_site_key(primary)=' || translation_site_key); EXCEPTION WHEN no_data_found THEN -- If RMAN is not site aware or dummy instance and we didn't find -- a primary, there is role transition from prim->stby and I have -- not seen new primary. In such cases, use old primary's site -- key which most likely will be the same as the current site. -- If the current site is not last primary or was never resynced, -- then we are left with option of getting some site that was -- resynced so that the database structure information can be -- extracted by translation cursors. select count(*) into ever_resynced from rc_datafile where site_key = this_site_key; IF ever_resynced > 0 THEN translation_site_key := this_site_key; ELSE select max(site_key) into translation_site_key from node where db_key=this_db_key; END IF; deb(DEB_PRINT, 'translation_site_key(no_data_found)=' || translation_site_key); WHEN too_many_rows THEN -- bug 7138218 -- during upgrade, we may have multiple primary databases for same -- db_key, hence tolerate having multiple primary dbs -- and they will be fixed by DBMS_RCVCAT.setDatabase call. select max(site_key) into translation_site_key from node where db_key=this_db_key and database_role ='PRIMARY'; deb(DEB_PRINT, 'translation_site_key(too_many_rows)=' || translation_site_key); END; END IF; EXCEPTION WHEN no_data_found THEN deb(DEB_PRINT, 'translation_site_key is NULL'); END; IF site_aware AND this_site_key is NULL THEN this_site_key := translation_site_key; deb(DEB_PRINT, 'this_site_key is set to same as translation_site_key'); END IF; deb(DEB_EXIT); END setDatabase; ----------------------------- getDbUniqueName --------------------------------- FUNCTION getDbUniqueName( db_id IN number) RETURN varchar2 IS dbunqnm node.db_unique_name%TYPE; CURSOR dbunqnm_c IS SELECT node.db_unique_name FROM node, db WHERE db.db_id = getDbUniqueName.db_id AND db.db_key = node.db_key; BEGIN SELECT node.db_unique_name INTO dbunqnm FROM node, db WHERE db.db_id = getDbUniqueName.db_id AND db.db_key = node.db_key; RETURN dbunqnm; EXCEPTION WHEN no_data_found THEN RETURN NULL; END getDbUniqueName; --------------------------------- setDbincKey --------------------------------- PROCEDURE setDbincKey( key IN number) IS BEGIN deb(DEB_ENTER, 'setDbincKey'); IF (key is not null) THEN this_dbinc_key := key; ELSE -- if this query gets more than 1 row, than you lose. try again. SELECT curr_dbinc_key INTO this_dbinc_key FROM db; END IF; SELECT db_key, reset_scn, reset_time INTO this_db_key, this_reset_scn, this_reset_time FROM dbinc WHERE dbinc_key = this_dbinc_key; deb(DEB_EXIT); END setDbincKey; ----------------------------- getParentIncarnation ---------------------------- FUNCTION getParentIncarnation( resetlogs_change# IN OUT number ,resetlogs_time IN OUT date) RETURN number IS BEGIN deb(DEB_ENTER, 'getParentIncarnation'); -- If input is null, this is the first call, and we return the current -- incarnation's key. IF (resetlogs_change# is null) THEN getParentIncarnationKey := this_dbinc_key; END IF; SELECT resetlogs_change#, resetlogs_time, parent_dbinc_key INTO resetlogs_change#, resetlogs_time, getParentIncarnationKey FROM rc_database_incarnation where dbinc_key = getParentIncarnationKey; deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END getParentIncarnation; -------------------------------- getCheckpoint -------------------------------- PROCEDURE getCheckpoint( scn OUT number ,seq OUT number ,ckp_key_1 OUT number ,ckp_key_2 OUT number) IS full_row ckp%ROWTYPE; either_row ckp%ROWTYPE; BEGIN deb(DEB_ENTER, 'getCheckpoint'); IF (this_dbinc_key is NULL) THEN deb(DEB_EXIT, 'with error 20020'); raise_application_error(-20020, 'Database incarnation not set'); END IF; -- Return the SCN and controlfile sequence from the ckp table row -- with the greatest ckp_scn as the most recent FULL checkpoint. -- The first query finds the FULL ckp with the greatest ckp_scn. -- If there are 2 FULL checkpoints with the same ckp_scn, we choose the -- one with the highest cf_seq or the highest cf_create_time. -- The second query finds the row of either ckp_type with the highest -- ckp_scn for the cf_create_time obtained in the -- previous query. If there are 2 rows with the same ckp_scn for a -- given recid and create_time, then we choose the one with the highest -- cf_seq. If the cf_seq is the same, then the ckp_type columns *must* -- differ because of the ckp_u1 constraint. In this unlikely case, we -- choose the 'PARTIAL' one. FOR r IN (SELECT /*+ first_rows */ * FROM ckp WHERE dbinc_key = this_dbinc_key AND ckp_type = 'FULL' AND site_key = this_site_key ORDER BY ckp_scn DESC, cf_create_time DESC, ckp_cf_seq DESC) LOOP full_row := r; EXIT; END LOOP; FOR r IN (SELECT /*+ first_rows */ * FROM ckp WHERE dbinc_key = this_dbinc_key AND cf_create_time = full_row.cf_create_time AND site_key = this_site_key ORDER BY ckp_scn DESC, ckp_cf_seq DESC, ckp_type DESC) LOOP either_row := r; EXIT; END LOOP; IF either_row.ckp_key IS NOT NULL THEN scn := either_row.ckp_scn; seq := either_row.ckp_cf_seq; ckp_key_1 := full_row.ckp_key; ckp_key_2 := either_row.ckp_key; ELSE scn := 0; seq := 0; ckp_key_1 := 0; ckp_key_2 := 0; END IF; deb(DEB_EXIT); END getCheckpoint; PROCEDURE getCheckpoint( scn OUT number ,seq OUT number) IS ckp_key_1 number; ckp_key_2 number; BEGIN getCheckpoint(scn, seq, ckp_key_1, ckp_key_2); END getCheckpoint; ------------------- -- Query Filters -- ------------------- --------------------------------- setUntilTime -------------------------------- PROCEDURE setUntilTime( unttime IN date) IS walk_dbinc_key number := NULL; parent_dbinc_key number := NULL; BEGIN deb(DEB_ENTER, 'setUntilTime'); IF (this_dbinc_key is NULL) THEN deb(DEB_EXIT, 'with error 20020'); raise_application_error(-20020, 'Database incarnation not set'); END IF; walk_dbinc_key := this_dbinc_key; <> untilSCN := NULL; untilTime := unttime; rpoint_set := FALSE; BEGIN SELECT resetlogs_change# INTO untilSCN FROM rc_database_incarnation WHERE dbinc_key = walk_dbinc_key AND resetlogs_time < untilTime; EXCEPTION WHEN no_data_found THEN BEGIN IF (allIncarnations = TRUE#) THEN SELECT parent_dbinc_key INTO parent_dbinc_key FROM dbinc WHERE dbinc.dbinc_key = walk_dbinc_key; walk_dbinc_key := parent_dbinc_key; IF (walk_dbinc_key IS NULL) THEN deb(DEB_IN, 'parent_dbinc_key=NULL -> exiting'); untilSCN := 0; -- begining of world ELSE deb(DEB_IN, 'parent_dbinc_key=' || to_char(parent_dbinc_key)); GOTO parent_inc; -- get scn of parent incarnation END IF; ELSE deb(DEB_EXIT, 'with error 20207'); raise_application_error(-20207, 'until time is before resetlogs time'); END IF; END; END; IF walk_dbinc_key != this_dbinc_key THEN actual_dbinc_key := walk_dbinc_key; deb(DEB_IN, 'actual_dbinc_key set to: '||to_char(actual_dbinc_key)); END IF; -- find the highest SCN that is associated with a timestamp less than or -- equal to the untilTime. This will give us an approximation of the SCN -- where point-in-time recovery will stop and we use it to limit the -- candidates for restore to backups and copies that are not fuzzy at the -- SCN. deb(DEB_IN, 'calling computeUntilSCN. untilSCN= '||to_char(untilSCN)); deb(DEB_IN, 'calling computeUntilSCN. untilTime= '||to_char(untilTime)); computeUntilSCN(untilTime, untilSCN, allIncarnations); deb(DEB_IN, 'untilSCN= '||to_char(untilSCN)); deb(DEB_EXIT, 'untilTime='||to_char(untilTime)); EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with error 20207'); raise_application_error(-20207, 'until time is before resetlogs time'); END setUntilTime; --------------------------------- setUntilScn --------------------------------- PROCEDURE setUntilScn( scn IN number ,rlgscn IN number DEFAULT NULL ,rlgtime IN date DEFAULT NULL ,flbrp IN boolean DEFAULT FALSE ,rpoint IN boolean DEFAULT FALSE) IS walk_dbinc_key number := NULL; walk_dbinc_scn number := NULL; walk_dbinc_time date := NULL; parent_dbinc_key number := NULL; BEGIN deb(DEB_ENTER, 'setUntilSCN'); IF (this_dbinc_key is NULL) THEN deb(DEB_EXIT, 'with error 20020'); raise_application_error(-20020, 'Database incarnation not set'); END IF; IF (flbrp AND rlgscn IS NOT NULL AND rlgtime IS NOT NULL) THEN -- flashback to restore point BEGIN SELECT dbinc_key INTO walk_dbinc_key FROM rc_database_incarnation WHERE resetlogs_change# = rlgscn AND resetlogs_time = rlgtime; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with error 20212'); raise_application_error(-20212, 'until SCN is an orphan incarnation'); END; ELSE walk_dbinc_Key := this_dbinc_key; END IF; <> untilSCN := scn; untilTime := NULL; BEGIN SELECT untilSCN, resetlogs_change#, resetlogs_time INTO untilSCN, walk_dbinc_scn, walk_dbinc_time FROM rc_database_incarnation WHERE dbinc_key = walk_dbinc_key AND resetlogs_change# < untilSCN; EXCEPTION WHEN no_data_found THEN BEGIN IF (allIncarnations = TRUE#) THEN SELECT parent_dbinc_key INTO parent_dbinc_key FROM dbinc WHERE dbinc.dbinc_key = walk_dbinc_key; walk_dbinc_key := parent_dbinc_key; IF (walk_dbinc_key IS NULL) THEN deb(DEB_EXIT, 'parent_dbinc_key=NULL, with error 20208'); raise_application_error(-20208, 'until SCN is before resetlogs SCN'); ELSE deb(DEB_IN, 'parent_dbinc_key=' || to_char(parent_dbinc_key)); GOTO parent_inc; -- get scn of parent incarnation END IF; ELSE deb(DEB_EXIT, 'with error 20208'); raise_application_error(-20208, 'until SCN is before resetlogs SCN'); END IF; END; END; IF (rlgscn != walk_dbinc_scn OR rlgtime != walk_dbinc_time) THEN deb(DEB_EXIT, 'with error 20212'); raise_application_error(-20212, 'until SCN is an orphan incarnation'); END IF; -- Remember if a restore was used in the non-flashback case. rpoint_set := rpoint and not flbrp; IF walk_dbinc_key != this_dbinc_key THEN actual_dbinc_key := walk_dbinc_key; deb(DEB_IN, 'actual_dbinc_key set to: '||to_char(actual_dbinc_key)); END IF; deb(DEB_EXIT); EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with error 20208'); raise_application_error(-20208,'until SCN is before resetlogs SCN'); END setUntilScn; --------------------------------- setUntilLog --------------------------------- PROCEDURE setUntilLog( sequence# IN number ,thread# IN number) IS walk_dbinc_key number := NULL; parent_dbinc_key number := NULL; BEGIN deb(DEB_ENTER, 'setUntilLog'); rpoint_set := FALSE; IF (this_dbinc_key is NULL) THEN deb(DEB_EXIT, 'with error 20020'); raise_application_error(-20020, 'Database incarnation not set'); END IF; IF (sequence# is NULL) THEN deb(DEB_EXIT, 'with error 20205'); raise_application_error(-20205, 'Incomplete UNTIL clause'); END IF; walk_dbinc_Key := this_dbinc_key; <> untilTime := NULL; untilSCN := NULL; BEGIN -- set untilSCN to the low SCN of the specified log SELECT first_change# INTO untilSCN FROM rc_log_history WHERE dbinc_key = walk_dbinc_key AND thread# = nvl(setUntilLog.thread#, 1) -- default thread# is 1 AND sequence# = setUntilLog.sequence#; EXCEPTION WHEN no_data_found THEN BEGIN -- the specified log is not (yet?) in the recovery catalog. -- try setting untilSCN to the next SCN of the previous log SELECT next_change# INTO untilSCN FROM rc_log_history WHERE dbinc_key = this_dbinc_key -- default thread# is 1 AND thread# = nvl(setUntilLog.thread#, 1) AND sequence# = setUntilLog.sequence# - 1; EXCEPTION WHEN no_data_found THEN BEGIN IF (allIncarnations = TRUE#) THEN SELECT parent_dbinc_key INTO parent_dbinc_key FROM dbinc WHERE dbinc.dbinc_key = walk_dbinc_key; walk_dbinc_key := parent_dbinc_key; IF (walk_dbinc_key IS NULL) THEN deb(DEB_EXIT, 'with error 20206'); raise_application_error(-20206, 'Specified log does not exist'); ELSE deb(DEB_IN, 'parent_dbinc_key=' || to_char(parent_dbinc_key)); GOTO parent_inc; -- get scn of parent incarnation END IF; ELSE deb(DEB_EXIT, 'with error 20206'); raise_application_error(-20206, 'Specified log does not exist'); END IF; END; END; END; IF walk_dbinc_key != this_dbinc_key THEN actual_dbinc_key := walk_dbinc_key; deb(DEB_IN, 'actual_dbinc_key set to: '||to_char(actual_dbinc_key)); END IF; deb(DEB_EXIT); END setUntilLog; --------------------------------- setToLog ------------------------------------ PROCEDURE setToLog( sequence# IN number ,thread# IN number) IS BEGIN deb(DEB_ENTER, 'setToLog'); untilTime := NULL; untilSCN := NULL; rpoint_set := FALSE; IF (this_dbinc_key is NULL) THEN deb(DEB_EXIT, 'with error 20020'); raise_application_error(-20020, 'Database incarnation not set'); END IF; IF (sequence# is NULL) THEN deb(DEB_EXIT, 'with error 20205'); raise_application_error(-20205, 'Incomplete TO clause'); END IF; BEGIN -- set untilSCN to the (next SCN - 1) of the specified log SELECT (next_change# - 1) INTO untilSCN FROM rc_log_history WHERE dbinc_key = this_dbinc_key AND thread# = nvl(setToLog.thread#, 1) -- default thread# is 1 AND sequence# = setToLog.sequence#; EXCEPTION WHEN no_data_found THEN BEGIN -- the specified log is not (yet?) in the recovery catalog. -- try setting untilSCN to the (first SCN - 1) of the next log SELECT (first_change# - 1) INTO untilSCN FROM rc_log_history WHERE dbinc_key = this_dbinc_key -- default thread# is 1 AND thread# = nvl(setToLog.thread#, 1) AND sequence# = setToLog.sequence# + 1; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with error 20206'); raise_application_error(-20206, 'Specified log does not exist'); END; END; deb(DEB_EXIT); END setToLog; -------------------------- getRedoLogDeletionPolicy --------------------------- PROCEDURE getRedoLogDeletionPolicy( policy OUT varchar2) IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END getRedoLogDeletionPolicy; -------------------------- setRedoLogDeletionPolicy --------------------------- PROCEDURE setRedoLogDeletionPolicy( policy IN varchar2 ,alldest IN number) IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END setRedoLogDeletionPolicy; ----------------------------------- resetAll ---------------------------------- PROCEDURE resetAll( transclause IN boolean DEFAULT TRUE) IS BEGIN deb(DEB_PRINT, 'resetAll'); -- reset to defaults setRAflags(kindMask => allKind, allRecords => FALSE); setAllFlag(FALSE); setLikePattern(NULL); setCompletedRange(after => NULL, before => NULL); resetUntil; setFrom(NULL); resetDeviceType; setTag(NULL); -- restoreTag := NULL setStandby(NULL); versionCounter := 1; -- for getPackageVersion getArchivedLogCursor := NULL; getBackupPieceCursor := NULL; getDatafileCopyCursor := NULL; getDatafileCursor := NULL; getProxyCopyCursor := NULL; IF (transclause) THEN deb(DEB_PRINT, 'reset transclause'); resetAlTransClause; -- reset AlTransClause resetDBTransClause; -- reset DBTransClause resetDbidTransClause; -- reset DbidTransClause END IF; resetBsRecCache(FALSE); -- reset findvalid backupset cache setRcvRecBackupAge(0); -- reset backup age variables setRecoveryDestFile(FALSE); -- reset to all files findSpfileBackupCursor := FALSE; findControlfileBackupCursor := FALSE; -- clear rcvRecCursor record rcvRecCursor.currc1.type_con := to_number(null); rcvRecCursor.reqfno := to_number(null); rcvRecCursor.reqcrescn := to_number(null); rcvRecCursor.reqpluginSCN := 0; rcvRecCursor.excludeAction := 0; -- clear rcvRecStack resetrcvRecStack; pname_i := 0; -- reset debuging IF findControlfileBackup_c%ISOPEN THEN CLOSE findControlfileBackup_c; END IF; IF findSpfileBackup_c%ISOPEN THEN CLOSE findSpfileBackup_c; END IF; IF findControlFileCopyKey%ISOPEN THEN CLOSE findControlFileCopyKey; END IF; IF findDatafileCopyKey%ISOPEN THEN CLOSE findDatafileCopyKey; END IF; IF findDatafileBackup_c%ISOPEN THEN CLOSE findDatafileBackup_c; END IF; IF findProxyCopy%ISOPEN THEN CLOSE findProxyCopy; END IF; IF findProxyCopyKey%ISOPEN THEN CLOSE findProxyCopyKey; END IF; IF findArchivedLogCopy%ISOPEN THEN CLOSE findArchivedLogCopy; END IF; IF findArcLogBackup%ISOPEN THEN CLOSE findArcLogBackup; END IF; IF findRangeArcLogBackup%ISOPEN THEN CLOSE findRangeArcLogBackup; END IF; IF findValidBackupSet_c%ISOPEN THEN CLOSE findValidBackupSet_c; END IF; IF findValidBackupSet1P_c%ISOPEN THEN CLOSE findValidBackupSet1P_c; END IF; IF findBackupPiece_c%ISOPEN THEN CLOSE findBackupPiece_c; END IF; IF findBackupPieceBpKey%ISOPEN THEN CLOSE findBackupPieceBpKey; END IF; IF findBackupPieceBsKey1%ISOPEN THEN CLOSE findBackupPieceBsKey1; END IF; IF findBackupPieceBsKey2%ISOPEN THEN CLOSE findBackupPieceBsKey2; END IF; IF translateDatabase_c%ISOPEN THEN CLOSE translateDatabase_c; END IF; IF translateTablespace_c%ISOPEN THEN CLOSE translateTablespace_c; END IF; IF translateDatafileName%ISOPEN THEN CLOSE translateDatafileName; END IF; IF translateDatafileNumber%ISOPEN THEN CLOSE translateDatafileNumber; END IF; IF translateDatafileCheckpoint%ISOPEN THEN CLOSE translateDatafileCheckpoint; END IF; IF translateAllDatafile_c%ISOPEN THEN CLOSE translateAllDatafile_c; END IF; IF translateCorruptList_c%ISOPEN THEN CLOSE translateCorruptList_c; END IF; IF translateTempfile_c%ISOPEN THEN CLOSE translateTempfile_c; END IF; IF translateTempfileName_c%ISOPEN THEN CLOSE translateTempfileName_c; END IF; IF translateTempfileNumber_c%ISOPEN THEN CLOSE translateTempfileNumber_c; END IF; IF translateOnlineLogs_c%ISOPEN THEN CLOSE translateOnlineLogs_c; END IF; IF translateArcLogKey%ISOPEN THEN CLOSE translateArcLogKey; END IF; IF translateArcLogName%ISOPEN THEN CLOSE translateArcLogName; END IF; IF translateArcLogSeqRange%ISOPEN THEN CLOSE translateArcLogSeqRange; END IF; IF translateArcLogSeqRange2%ISOPEN THEN CLOSE translateArcLogSeqRange2; END IF; IF translateArcLogTimeRange%ISOPEN THEN CLOSE translateArcLogTimeRange; END IF; IF translateArcLogTimeRange2%ISOPEN THEN CLOSE translateArcLogTimeRange2; END IF; IF translateArcLogSCNRange%ISOPEN THEN CLOSE translateArcLogSCNRange; END IF; IF translateArcLogSCNRange2%ISOPEN THEN CLOSE translateArcLogSCNRange2; END IF; IF translateArcLogPattern%ISOPEN THEN CLOSE translateArcLogPattern; END IF; IF lbal2%ISOPEN THEN CLOSE lbal2; END IF; IF ldbi%ISOPEN THEN CLOSE ldbi; END IF; IF lnni%ISOPEN THEN CLOSE lnni; END IF; IF lrtbs%ISOPEN THEN CLOSE lrtbs; END IF; IF getOfflineRangeCopy_c%ISOPEN THEN CLOSE getOfflineRangeCopy_c; END IF; IF rddf%ISOPEN THEN CLOSE rddf; END IF; IF translateDatabaseCorruption_c%ISOPEN THEN CLOSE translateDatabaseCorruption_c; END IF; IF findConfig_c%ISOPEN THEN CLOSE findConfig_c; END IF; IF findBackupsetFiles%ISOPEN THEN CLOSE findBackupsetFiles; END IF; IF findAllBackupPiece%ISOPEN THEN CLOSE findAllBackupPiece; END IF; IF dfBackupHistory_c1%ISOPEN THEN CLOSE dfBackupHistory_c1; END IF; IF dfBackupHistory_c2%ISOPEN THEN CLOSE dfBackupHistory_c2; END IF; IF dcBackupHistory_c%ISOPEN THEN CLOSE dcBackupHistory_c; END IF; IF alBackupHistory_c1%ISOPEN THEN CLOSE alBackupHistory_c1; END IF; IF alBackupHistory_c2%ISOPEN THEN CLOSE alBackupHistory_c2; END IF; IF bsBackupHistory_c1%ISOPEN THEN CLOSE bsBackupHistory_c1; END IF; IF bsBackupHistory_c1%ISOPEN THEN CLOSE bsBackupHistory_c1; END IF; IF getCopyofDatafile_c%ISOPEN THEN CLOSE getCopyofDatafile_c; END IF; IF getCopyofDatafile_c2%ISOPEN THEN CLOSE getCopyofDatafile_c2; END IF; IF rcvRecCursor1_c%ISOPEN THEN CLOSE rcvRecCursor1_c; END IF; IF rcvRecCursor1Filter_c%ISOPEN THEN CLOSE rcvRecCursor1Filter_c; END IF; IF rcvRecCursor2_c%ISOPEN THEN CLOSE rcvRecCursor2_c; END IF; IF listBackup_c%ISOPEN THEN CLOSE listBackup_c; END IF; getArchivedLogLast := NULL; -- clear for next time getArchivedLogDoingRecovery := FALSE#; -- clear for next time getArchivedLogOnlyrdf := 0; lbacked_al_next_scn := NULL; standby_became_primary_scn := NULL; getrcvRecLast := NULL; END resetAll; --------------------------- -- Backup Set Validation -- --------------------------- ------------------------------ findValidBackupSet ----------------------------- PROCEDURE findValidBackupSet( backupSetRec IN rcvRec_t ,deviceType IN varchar2 DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,available IN number DEFAULT TRUE# -- for compat. ,unavailable IN number DEFAULT FALSE# -- for compat. ,deleted IN number DEFAULT FALSE# -- for compat. ,expired IN number DEFAULT FALSE# -- for compat. ,availableMask IN binary_integer DEFAULT NULL) -- for compat. IS BEGIN deb(DEB_ENTER, 'findValidBackupSet'); IF (bsRecCacheEnabled) THEN cacheFindValidBackupSet( bsRec => backupSetRec, deviceType => deviceType, tag => tag, availableMask => NVL(availableMask, computeAvailableMask(available, unavailable, deleted, expired))); ELSE findValidBackupSet( bsKey => backupSetRec.bsKey_con, pieceCount => backupSetRec.pieceCount_con, deviceType => deviceType, tag => tag, availableMask => NVL(availableMask, computeAvailableMask(available, unavailable, deleted, expired))); END IF; deb(DEB_EXIT); END findValidBackupSet; -- This version of findValidBackupSet takes a bsRec_t instead of a rcvRec_t. ------------------------------ findValidBackupSet ----------------------------- PROCEDURE findValidBackupSet( backupSetRec IN bsRec_t ,deviceType IN varchar2 DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,available IN number DEFAULT TRUE# -- for compat. ,unavailable IN number DEFAULT FALSE# -- for compat. ,deleted IN number DEFAULT FALSE# -- for compat. ,expired IN number DEFAULT FALSE# -- for compat. ,availableMask IN binary_integer DEFAULT NULL) -- for compat. IS BEGIN deb(DEB_ENTER, 'findValidBackupSet bsRec_t'); findValidBackupSet(bsKey => backupSetRec.key, pieceCount => backupSetRec.pieceCount, deviceType => deviceType, tag => tag, availableMask => NVL(availableMask, computeAvailableMask(available, unavailable, deleted, expired))); deb(DEB_EXIT); END findValidBackupSet; -- Public procedure for 8.1.x and prior releases. --------------------------------- getDatafile --------------------------------- PROCEDURE getDatafile( file# OUT number ,crescn OUT number ,creation_time OUT date ,fname OUT varchar2 ,ts_name OUT varchar2 ,status OUT number ,blksize OUT number ,kbytes OUT number ,blocks OUT number ,unrecoverable_change# OUT number ,stop_change# OUT number ,read_only OUT number) IS dfRec dfRec_t; BEGIN deb(DEB_ENTER, 'getDataFile_2'); getDatafile(dfRec, oldClient => TRUE); file# := dfRec.dfNumber; crescn := dfRec.dfCreationSCN; creation_time := dfRec.dfCreationTime; fname := dfRec.fileName; ts_name := dfRec.tsName; status := 0; -- this is kccfesta, which we don't have blksize := dfRec.blockSize; kbytes := dfRec.kbytes; blocks := dfRec.blocks; unrecoverable_change# := 0; -- this is kccfeurs which isn't kept in rcvcat stop_change# := dfRec.stopSCN; read_only := dfRec.readOnly; deb(DEB_EXIT); EXCEPTION WHEN no_data_found THEN -- This is end-of-fetch. file# := NULL; deb(DEB_EXIT, 'with no more records'); END getDatafile; -- This procedure serves absolutely no purpose. It is here only for -- backwards compatbility with 8.1.5. The only call to this is from -- krmkafs(), which gets called from krmkgra(). Since the calls are always -- in sequence, we can simply save the last record returned from -- getRecoveryAction and avoid doing an extra query. -- The only value this functions returns that krmkgra() didn't already have -- in 8.1.5 is the xdf_key. Completion time was being estimated from the -- stamp. -------------------------- listTranslateProxyDFRecid -------------------------- PROCEDURE listTranslateProxyDFRecid( recid IN number ,stamp IN number ,xdf_key OUT number ,file# OUT number ,status OUT varchar2 ,handle OUT varchar2 ,completion_time OUT date ,checkpoint_change# OUT number ,checkpoint_time OUT date) IS BEGIN deb(DEB_ENTER, 'listTranslateProxyDFRecid'); -- See if the last rcvRec we returned matches the one requested here. -- I think it will always be the case that we can use the value we -- saved in rcvRec_last. But in case there is a path through Don's -- spaghetti code that I could not find, I include the cursor from the -- 8.1 package below and use it if the cached value is no good. IF (recid <> rcvRec_last.recid_con OR stamp <> rcvRec_last.stamp_con) THEN select d.xdf_key, d.file#, d.status, d.handle, d.completion_time, d.checkpoint_change#, d.checkpoint_time into xdf_key, file#, status, handle, completion_time, checkpoint_change#, checkpoint_time from rc_proxy_datafile d where db_key = this_db_key and ((user_site_key = d.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(d.site_key, this_site_key))))) and d.recid = listTranslateProxyDFRecid.recid and d.stamp = listTranslateProxyDFRecid.stamp union all select c.xcf_key, 0, c.status, c.handle, c.completion_time, c.checkpoint_change#, c.checkpoint_time from rc_proxy_controlfile c where db_key = this_db_key and ((user_site_key = c.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(c.site_key, this_site_key))))) and c.recid = listTranslateProxyDFRecid.recid and c.stamp = listTranslateProxyDFRecid.stamp; ELSE deb(DEB_PRINT, 'listTranslateProxyDFRecid: using cached rcvRec_last'); xdf_key := rcvRec_last.key_con; file# := rcvRec_last.dfNumber_obj; status := rcvRec_last.status_con; handle := rcvRec_last.fileName_con; completion_time := rcvRec_last.compTime_con; checkpoint_change# := rcvRec_last.toSCN_act; checkpoint_time := rcvRec_last.toTime_act; END IF; deb(DEB_EXIT); END listTranslateProxyDFRecid; -------------------- -- Offline Ranges -- -------------------- -- Find a controlfile copy that contains the given offline range -- The checkpoint scn of the controlfile must be greater than or equal to -- the online scn because the offline range record is inserted into the -- controlfile when the file is onlined. The controlfile creation time -- must be equal to the cursor parameter. -- Lastly, the min_offr_recid must be -- less than or equal to the offline range recid to guarantee that record -- has not been written over in the controlfile copy. ----------------------------- findOfflineRangeCopy ---------------------------- PROCEDURE findOfflineRangeCopy( offr_recid IN number ,offr_ckpscn IN number ,cf_cretime IN date ,dbinc_key IN number) IS BEGIN deb(DEB_ENTER, 'findOfflineRangeCopy'); validateState(null); deb(DEB_OPEN, 'getOfflineRangeCopy_c'); OPEN getOfflineRangeCopy_c(offrRecid => offr_recid, offrCkpSCN => offr_ckpscn, cfCreTime => cf_cretime, dbincKey => dbinc_key); deb(DEB_EXIT); END findOfflineRangeCopy; ----------------------------- getOfflineRangeCopy ----------------------------- PROCEDURE getOfflineRangeCopy( rcvRec OUT NOCOPY rcvRec_t) IS BEGIN deb(DEB_ENTER, 'getOfflineRangeCopy'); IF (NOT getOfflineRangeCopy_c%ISOPEN) THEN deb(DEB_EXIT, 'with error 20204'); raise_application_error(-20204, 'Translation not started'); END IF; FETCH getOfflineRangeCopy_c INTO rcvRec; IF (getOfflineRangeCopy_c%NOTFOUND) THEN CLOSE getOfflineRangeCopy_c; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; CLOSE getOfflineRangeCopy_c; deb(DEB_EXIT); END getOfflineRangeCopy; -- Obsolete as of 8.1.6 ----------------------------- getOfflineRangeCopy ----------------------------- FUNCTION getOfflineRangeCopy RETURN varchar2 IS rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'getOfflineRangeCopy815'); getOfflineRangeCopy(rcvRec); deb(DEB_EXIT, 'with: fileName'); RETURN rcvRec.fileName_con; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: NULL'); RETURN NULL; END getOfflineRangeCopy; -------------------------------------- -- Recovery Functions and Procedures -- --------------------------------------- ---------------------------- computeRecoveryActions --------------------------- FUNCTION computeRecoveryActions( fno IN number, -- Datafile number. crescn IN number, -- Datafile creation SCN. df_rlgscn IN number -- Datafile resetlogs SCN. Null unless we are doing default null, -- a RECOVER, in which case is the value in the -- datafile header. df_rlgtime IN date -- Datafile resetlogs time. Null if df_rlgscn is default null, -- null, else value from datafile header. df_ckpscn IN number -- Datafile checkpoint SCN. Null if df_rlgscn is default null, -- null, else value from datafile header. offlscn IN number -- kccfeofs (0 -> no offline range) default 0, onlscn IN number -- kccfeonc (0 if offlscn is 0). default 0, onltime IN date -- kccfonc_time default null, cleanscn IN number -- kccfecps if either SOR or WCC set, else 0. default 0, clean2scn IN number -- CF ckpt SCN if WCC set, infinity if SOR bit set default 0, -- else 0. clean2time IN date -- controlfile ckpt time if WCC, SYSDATE if SOR, else default null, -- this is ignored if cleanscn is 0 allowfuzzy IN boolean -- TRUE if can be fuzzy at until SCN/time, FALSE if default FALSE, -- not. default is FALSE. partial_rcv IN boolean -- TRUE if can do partial recovery, FALSE if not. default FALSE, -- A partial recovery would be to recover a datafile -- using redo up to some SCN, and then switching -- back to incrementals. This would be done -- because of a missing/unavailable incremental backup. -- We need to know because a partial -- media recovery can only be done if we have a -- current controlfile. A partial recovery does not -- recover the controlfile. This could be implemented, -- but it requires using an enqueue to ensure only -- 1 process tries to recover the confile. -- Since we aren't recovering the controlfile, -- the file header won't be handled properly when -- we hit controlfile redo. That is why we are -- requiring a current controlfile. -- The default is false because RMAN currently does -- not support partial recovery. cf_scn IN number -- controlfile ckpt SCN (NULL if none mounted) default NULL, cf_cretime IN date -- controlfile creation time (NULL if none mounted) default NULL, cf_offrrid IN number -- recid of oldest offline range in controlfile default NULL, -- (NULL if none mounted) allCopies IN boolean default FALSE, df_cretime IN DATE default NULL, rmanCmd IN binary_integer default unknownCmd_t, foreignDbid IN number default 0, pluggedRonly IN binary_integer default 0, pluginSCN IN number default 0, pluginRlgSCN IN number default 0, pluginRlgTime IN date default NULL, creation_thread IN number default NULL, creation_size IN number default NULL) RETURN binary_integer IS rc boolean; last number; this number; succ_flag boolean; thisAct rcvRec_t; doingRecovery boolean; lrmanCmd binary_integer := rmanCmd; target_scn number := to_number(null); BEGIN deb(DEB_ENTER, 'computeRecoveryActions'); IF (this_dbinc_key is NULL) THEN deb(DEB_EXIT, 'with error 20020'); raise_application_error(-20020, 'Database incarnation not set'); END IF; deb(DEB_IN, ' fno: '||fno); deb(DEB_IN, ' crescn: '||crescn); deb(DEB_IN, ' df_rlgscn: '||df_rlgscn); deb(DEB_IN, ' df_ckpscn: '||to_char(df_ckpscn)); deb(DEB_IN, ' offlscn: '||to_char(offlscn)); deb(DEB_IN, ' onlscn: '||to_char(onlscn)); deb(DEB_IN, ' cleanscn: '||to_char(cleanscn)); deb(DEB_IN, ' clean2scn: '||to_char(clean2scn)); deb(DEB_IN, ' cf_scn: '||to_char(cf_scn)); deb(DEB_IN, ' cf_offrrid: '||to_char(cf_offrrid)); deb(DEB_IN, ' foreignDbid: '||to_char(foreignDbid)); deb(DEB_IN, ' pluggedRonly: '||to_char(pluggedRonly)); deb(DEB_IN, ' pluginSCN: '||to_char(pluginSCN)); deb(DEB_IN, ' pluginRlgSCN: '||to_char(pluginRlgSCN)); deb(DEB_IN, ' creation_thread: '||to_char(creation_thread)); deb(DEB_IN, ' creation_size: '||to_char(creation_size)); resetrcvRecStack; computeRA_restorable := FALSE; computeRA_available := FALSE; computeRA_rcvCopy_avail := FALSE; IF (allCopies) THEN deb(DEB_IN, 'allCopies is TRUE'); ELSE deb(DEB_IN, 'allCopies is FALSE'); END IF; IF (df_cretime is null AND -- pre10g rman client lrmanCmd = unknownCmd_t AND df_ckpscn is not null) THEN deb(DEB_IN, 'rmanCmd set to recoverCmd_t for pre10g rman'); lrmanCmd := recoverCmd_t; END IF; IF (lrmanCmd = rcvCopyCmd_t) THEN deb(DEB_PRINT, 'doing recover copy'); doingRecovery := TRUE; ELSIF (lrmanCmd = recoverCmd_t) THEN deb(DEB_PRINT, 'doing recover'); doingRecovery := TRUE; ELSIF (lrmanCmd = obsoleteCmd_t) THEN deb(DEB_PRINT, 'doing report obsolete'); doingRecovery := NULL; IF (allCopies) THEN -- Calling it with allCopies forces addAction to add duplicate -- backupsets(ie with different copy number). These duplicate -- backupsets must never be considered for obsolete command. -- Instead, you must set computeRA_fullBackups raise_application_error(-20999, 'internal error: computeRecoveryActions ' || 'obsoleteCmd cannot be called with allCopies'); END IF; IF (tc_database = FALSE#) THEN -- rcvRecCursor1 is optimized by all files by skipping the call -- to isTranslatedFno. raise_application_error(-20999, 'internal error: computeRecoveryActions ' || 'obsoleteCmd cannot be called for specific files'); END IF; ELSIF (lrmanCmd = restoreCmd_t) THEN deb(DEB_PRINT, 'doing restore'); doingRecovery := NULL; ELSIF (lrmanCmd = blkRestoreCmd_t) THEN deb(DEB_PRINT, 'doing block restore'); doingRecovery := FALSE; target_scn := df_ckpscn; ELSIF (lrmanCmd = unknownCmd_t) THEN deb(DEB_PRINT, 'command unknown or called by pre-10i rman'); doingRecovery := NULL; ELSE raise_application_error(-20999, 'internal error: computeRecoveryActions' || ' rmanCmd='||nvl(to_char(lrmanCmd), 'NULL')); END IF; deb(DEB_IN, 'this_dbinc_key is:'||to_char(this_dbinc_key)); rc := computeRecoveryActions2(fno, crescn, df_cretime, df_rlgscn, df_rlgtime, df_ckpscn, offlscn, onlscn, onltime, cleanscn, clean2scn, clean2time, allowfuzzy, partial_rcv, target_scn, this_dbinc_key, cf_scn, cf_cretime, cf_offrrid, FALSE, succ_flag, allCopies, doingRecovery, lrmanCmd, foreignDbid, pluggedRonly, pluginSCN, pluginRlgSCN, pluginRlgTime, creation_thread, creation_size); IF (succ_flag) THEN IF (rcvRecStack.count > 0) THEN IF (computeRA_allRecords = FALSE#) THEN -- We need to find any incremental backups that are redundant -- and mark them to be skipped. It is possible that we -- have stacked an incremental backup that we can skip because -- it overlaps two other incremental backups. -- We can delete action n if action n - 1 can be applied on -- top of action n + 1. I.e., if the toSCN(n+1) >= fromSCN(n-1). -- Note that the actions on the stack, from top..1, are in -- descending toSCN order. This means n+1 comes before n-1. -- Loop from top-1..2 These are the "middle" actions. Go in -- reverse order since that is the order in which the stack was -- built. "last" is the last action we know we are keeping. -- We know we are keeping the action at the top of the stack, as it -- is always a full backup. So "last" starts at the stack top. -- "last" is n+1 in the formula above. last := rcvRecStack.count; deb(DEB_IN,'computeRecoveryActions: Top of stack='||rcvRecStack.count); FOR this IN REVERSE 2..rcvRecStack.count - 1 LOOP IF ((rcvRecStack(last).toSCN_act >= rcvRecStack(this-1).fromSCN_act) AND NOT -- Keep it if allCopies and it is the same container. (allCopies AND ((rcvRecStack(last).key_con = rcvRecStack(this).key_con AND rcvRecStack(last).type_con = rcvRecStack(this).type_con) OR (rcvRecStack(this-1).key_con = rcvRecStack(this).key_con AND rcvRecStack(this-1).type_con = rcvRecStack(this).type_con)))) THEN deb(DEB_PRINT, 'computeRecoveryActions: marking this action deleted:'); rcvRecGet(this, thisAct); IF (debug) THEN printRcvRec(thisAct); END IF; rcvRecStack(this).type_con := rcvRecStack(this).type_con + deleted_con_t; ELSE -- We are going to keep this action, so it becomes the last. last := this; END IF; END LOOP; END IF; -- computeRA_allRecords = FALSE# deb(DEB_EXIT, 'with: SUCCESS'); RETURN SUCCESS; ELSE deb(DEB_EXIT, 'with: NO_ACTION'); RETURN NO_ACTION; -- a recovery that can only use redo END IF; ELSIF (computeRA_available) THEN deb(DEB_EXIT, 'with: AVAILABLE'); RETURN dbms_rcvman.AVAILABLE; ELSIF (computeRA_restorable) THEN deb(DEB_EXIT, 'with: RESTORABLE'); RETURN RESTORABLE; ELSE deb(DEB_EXIT, 'with: UNAVAILABLE'); RETURN dbms_rcvman.UNAVAILABLE; END IF; END computeRecoveryActions; --------------------- -- Report Obsolete -- --------------------- -- 8.1.5+ version -------------------------------- reportGetDFDel ------------------------------- FUNCTION reportGetDFDel( file# OUT number ,filetype OUT number ,checkpoint_change# OUT number ,checkpoint_time OUT date ,resetlogs_change# OUT number ,resetlogs_time OUT date ,incremental_change# OUT number ,fuzzy_change# OUT number ,recid OUT number ,stamp OUT number ,fname OUT varchar2 ,restorable OUT number ,key OUT number ,completion_time OUT date) RETURN number IS device_type rc_backup_piece.device_type%TYPE; mytype number; set_stamp number; set_count number; pref number; bsRec bsRec_t; validRec validBackupSetRec_t; rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'reportGetDFDel'); FETCH rddf INTO pref, file#, mytype, checkpoint_change#, checkpoint_time, resetlogs_change#, resetlogs_time, incremental_change#, fuzzy_change#, recid, stamp, fname, set_stamp, set_count, key, completion_time, device_type; filetype := mytype; IF (rddf%found) THEN IF (mytype in (FULL_DF_BACKUP, INCREMENTAL_DF_BACKUP)) THEN findBackupSet(recid => recid, stamp => stamp, bsRec => bsRec); -- This is a bit dangerous. We are hacking together a rcvRec -- from a bsRec. rcvRec.bsKey_con := bsRec.key; rcvRec.elapseSecs_con := bsRec.elapseSecs; rcvRec.pieceCount_con := bsRec.pieceCount; restorable := validateBackupSet( backupSetRec => rcvRec, checkDeviceIsAllocated => TRUE, availableMask => dbms_rcvman.BSavailable, validRec => validRec); ELSIF (mytype = OFFLINE_RANGE) THEN restorable := SUCCESS; ELSE IF (anyDevice = TRUE# OR isDeviceTypeAllocated(device_type) = TRUE#) THEN restorable := SUCCESS; ELSE restorable := dbms_rcvman.AVAILABLE; END IF; END IF; deb(DEB_EXIT, 'with: '||TRUE#); RETURN TRUE#; ELSE CLOSE rddf; deb(DEB_EXIT, 'with: '||FALSE#); RETURN FALSE#; END IF; END reportGetDFDel; ------------ -- TSPITR -- ------------ --------------------------------- getCloneName -------------------------------- FUNCTION getCloneName( fno IN number ,crescn IN number ,pluscn IN number DEFAULT 0) RETURN varchar2 IS fname df.clone_fname%TYPE; BEGIN deb(DEB_ENTER, 'getCloneName'); IF (this_dbinc_key is NULL) THEN deb(DEB_EXIT, 'with error 20020'); raise_application_error(-20020, 'Database incarnation not set'); END IF; SELECT clone_fname INTO fname FROM df WHERE dbinc_key = this_dbinc_key AND file# = fno AND create_scn = crescn AND plugin_scn = pluscn; deb(DEB_EXIT, 'with: '||fname); RETURN fname; EXCEPTION WHEN no_data_found THEN -- This should never happen. The fno and crescn values came from -- the recovery catalog, so we should have this record. deb(DEB_EXIT, 'with error 20218'); raise_application_error(-20218, 'Datafile not found in recovery catalog'); WHEN others THEN deb(DEB_EXIT, 'Just raising error'); raise; END getCloneName; --------------- -- DUPLICATE -- --------------- --------------------------------- wasFileOffline ------------------------------ FUNCTION wasFileOffline( fno IN number ,untilscn IN number) RETURN number IS x number; BEGIN deb(DEB_ENTER, 'wasFileOffline'); select 1 into x from rc_offline_range ofr, rc_database_incarnation di where ofr.db_key = this_db_key and di.db_key = this_db_key and ofr.dbinc_key = di.dbinc_key and untilscn >= offline_change# and untilscn < online_change# and file# = fno; deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END wasFileOffline; ---------------------------------- -- Get max(copy#) backup piece -- ---------------------------------- --------------------------------- getmaxcopyno--------------------------------- FUNCTION getmaxcopyno( bsstamp IN number ,bscount IN number) RETURN number IS maxcopy number; BEGIN select max(copy#) into maxcopy from rc_backup_piece bp where bp.set_stamp = bsstamp and bp.set_count = bscount -- return the max copy no for the backup set, we should not filter -- on backup pieces that are local on disk in DG environment -- because during backup backupset, the highest copy number -- accross all sites must be returned. Otherwise, the copy number -- for some pieces may be duplicated accross site causing problems -- later reconciling the numbers when ownership is transfered. and bp.db_key = this_db_key; return maxcopy; END getmaxcopyno; --------------------------- getMaxDfNumber ------------------------------------ FUNCTION getMaxDfNumber RETURN number IS maxfile# number; BEGIN select max(file#) into maxfile# from df; return maxfile#; END getMaxDfNumber; ----------------------------- getdropOSFiles ---------------------------------- PROCEDURE getdropOSFiles( first IN boolean ,agedFileRec OUT NOCOPY agedFileRec_t) IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END getdropOSFiles; ----------------------------- getBackedUpFiles ------------------------------- PROCEDURE getBackedUpFiles( first IN boolean ,agedFileRec OUT NOCOPY agedFileRec_t) IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END getBackedUpFiles; -------------------------- validateStandbyConfig ----------------------------- FUNCTION validateStandbyConfig( policy IN varchar2 ,alldest IN number) RETURN NUMBER IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); return dbms_rcvman.FALSE#; END validateStandbyConfig; -------------------------- getSCNForAppliedPolicy ----------------------------- PROCEDURE getSCNForAppliedPolicy( minscn OUT number ,rlgscn OUT number) IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END getSCNForAppliedPolicy; ----------------------------- getAppliedAl ------------------------------------ PROCEDURE getAppliedAl( first IN boolean ,agedFileRec OUT NOCOPY agedFileRec_t) IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END getAppliedAl; -------------------------- getRequiredSCN ----------------------------- PROCEDURE getRequiredSCN( reqscn OUT number ,rlgscn OUT number ,streams IN number DEFAULT 0 ,alldest IN number DEFAULT 0) IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END getRequiredSCN; -------------------------- getAppliedSCN ------------------------------ PROCEDURE getAppliedSCN( appscn OUT number ,rlgscn OUT number ,alldest IN number) IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END getAppliedSCN; ----------------------------- isBsRecCacheMatch ------------------------------- FUNCTION isBsRecCacheMatch( key IN number ,deviceType IN varchar2 ,tag IN varchar2 ,status IN varchar2) RETURN NUMBER IS bucket number; sb4_bucket binary_integer; BEGIN bucket := mod(key, CONST4GVAL); IF (bucket >= CONST2GVAL) THEN sb4_bucket := CONST2GVAL - bucket; ELSE sb4_bucket := bucket; END IF; IF (NOT cacheBsRecTable.bsRec.exists(sb4_bucket)) THEN RETURN FALSE#; END IF; FOR i in 1..cacheBsRecTable.bsRec(sb4_bucket).bslist.count LOOP IF (cacheBsRecTable.bsRec(sb4_bucket).bslist(i).bskey = key) THEN -- Does status Match? IF (cacheBsRecTable.mask = BSavailable) THEN IF (status != 'A') THEN RETURN FALSE#; END IF; ELSIF (isStatusMatch(status, cacheBsRecTable.mask) = FALSE#) THEN RETURN FALSE#; END IF; -- Does device Type match? IF (deviceType != cacheBsRecTable.deviceType) THEN RETURN FALSE#; END IF; -- Does tag match? IF (nvl(tag, ' ') != nvl(cacheBsRecTable.tag, nvl(tag, ' '))) THEN RETURN FALSE#; END IF; -- Remember that mixcopy can produce a usuable set cacheBsRecTable.bsRec(sb4_bucket).bslist(i).mixcopy := TRUE; RETURN TRUE#; END IF; END LOOP; RETURN FALSE#; END isBsRecCacheMatch; -------------------------------- resetReclRecid ------------------------------ PROCEDURE resetReclRecid IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END resetReclRecid; -------------------------------- setReclRecid -------------------------------- PROCEDURE setReclRecid( rectype IN binary_integer ,recid IN number) IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END setReclRecid; -------------------------------- IsReclRecid --------------------------------- FUNCTION IsReclRecid( rectype IN binary_integer ,recid IN number) RETURN NUMBER IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END IsReclRecid; -------------------------------- getSpaceRecl -------------------------------- FUNCTION getSpaceRecl(ceilAsm IN binary_integer default 0) RETURN NUMBER IS BEGIN raise_application_error(-20999, 'Not supported in recovery catalog'); END getSpaceRecl; ------------------------------ getFlashbackInfo ------------------------------ -- Return flashback until time and minimum guaranteed restore point. -- If null or not found, return maximum value. PROCEDURE getFlashbackInfo( fbUntilTime OUT DATE ,minGrsp OUT NUMBER) IS clean_grsp number; count_grsp number; BEGIN BEGIN SELECT nvl(oldest_flashback_time, MAXDATEVAL) INTO fbUntiltime FROM fb WHERE dbinc_key = this_dbinc_key AND db_unique_name = this_db_unique_name; EXCEPTION WHEN no_data_found THEN fbUntilTime := MAXDATEVAL; END; BEGIN SELECT min(to_scn), count(*), count(case when from_scn <= to_scn then 1 else 0 end) INTO minGrsp, count_grsp, clean_grsp FROM grsp, dbinc WHERE grsp.dbinc_key = dbinc.dbinc_key AND dbinc.db_key = this_db_key AND grsp.site_key = this_site_key AND grsp.guaranteed = 'YES' AND from_scn != 0; -- If there is just one grsp and it is clean, then no archivelogs or -- backups to be kept. IF (clean_grsp = 1 AND count_grsp = 1) THEN minGrsp := MAXSCNVAL; END IF; EXCEPTION WHEN no_data_found THEN minGrsp := MAXSCNVAL; END; deb(DEB_PRINT, 'getFlashbackInfo: fbUntilTime=' || to_char(fbUntilTime) || ' minGrsp=' || minGrsp); END getFlashbackInfo; ------------------------------ openLbCursor ---------------------------------- PROCEDURE openLbCursor(lbc OUT NOCOPY lbCursor_t) IS BEGIN IF (lbc%ISOPEN) THEN CLOSE lbc; END IF; OPEN lbc FOR SELECT -- Backup Sets bs.bs_key list_order1, 0 list_order2, bs.bs_key pkey, backupset_txt backup_type, backupset_txt file_type, decode(bs.keep_options, 0, 'NO', 'YES') keep, bs.keep_until keep_until, decode(bs.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', null) keep_options, null status, null fname, null tag, null media, bs.bs_recid recid, bs.bs_stamp stamp, null device_type, 0 block_size, bs.completion_time completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, null bytes, bs.bs_key bs_key, bs.set_count bs_count, bs.set_stamp bs_stamp, decode(bs.bck_type, 'L', archivedlog_txt, datafile_txt) bs_type, decode(bs.incr_level, 0, full_txt, 1, incr1_txt, 2, incr2_txt, 3, incr3_txt, 4, incr4_txt, decode(bs.bck_type, 'I', incr_txt, full_txt)) bs_incr_type, bs.pieces bs_pieces, null bs_copies, bs.completion_time bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, null df_file#, null df_tablespace, null df_resetlogs_change#, null df_creation_change#, null df_checkpoint_change#, null df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM bs WHERE bs.db_key = this_db_key UNION ALL SELECT -- Backup Pieces bp.bs_key list_order1, 1 list_order2, bp.bp_key pkey, backupset_txt backup_type, piece_txt file_type, null keep, null keep_until, null keep_options, decode(bp.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, bp.handle fname, bp.tag tag, bp.media media, bp.bp_recid recid, bp.bp_stamp stamp, bp.device_type device_type, 0 block_size, bp.completion_time completion_time, bp.is_recovery_dest_file is_rdf, bp.compressed compressed, null obsolete, null keep_for_dbpitr, bp.bytes bytes, bp.bs_key bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, bp.piece# bp_piece#, bp.copy# bp_copy#, null df_file#, null df_tablespace, null df_resetlogs_change#, null df_creation_change#, null df_checkpoint_change#, null df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM bp WHERE bp.db_key = this_db_key AND ((user_site_key = bp.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE# AND bp.device_type = 'DISK') OR (tape_backups_shared = TRUE# AND bp.device_type <>'DISK') OR (this_site_key = nvl(bp.site_key, this_site_key))))) UNION ALL SELECT -- Backup Datafile bdf.bs_key list_order1, 2 list_order2, bdf.bdf_key pkey, backupset_txt backup_type, datafile_txt file_type, null keep, null keep_until, null keep_options, null status, null fname, null tag, null media, bdf.bdf_recid recid, bdf.bdf_stamp stamp, null device_type, bdf.block_size block_size, bdf.completion_time completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, bdf.block_size * bdf.blocks bytes, bdf.bs_key bs_key, null bs_count, null bs_stamp, null bs_type, decode(bdf.incr_level, 0, full_txt, 1, incr1_txt, 2, incr2_txt, 3, incr3_txt, 4, incr4_txt, decode(greatest(bdf.create_scn, bdf.incr_scn), bdf.create_scn, full_txt, incr_txt)) bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, bdf.file# df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, bdf.create_scn df_creation_change#, bdf.ckp_scn df_checkpoint_change#, bdf.ckp_time df_ckp_mod_time, bdf.incr_scn df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM bdf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = bdf.dbinc_key UNION ALL SELECT -- Backup Controlfile bcf.bs_key list_order1, 2 list_order2, bcf.bcf_key pkey, backupset_txt backup_type, controlfile_txt file_type, null keep, null keep_until, null keep_options, null status, null fname, null tag, null media, bcf.bcf_recid recid, bcf.bcf_stamp stamp, null device_type, bcf.block_size block_size, null completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, bcf.block_size * bcf.blocks bytes, bcf.bs_key bs_key, null bs_count, null bs_stamp, null bs_type, full_txt bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, 0 df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, 0 df_creation_change#, bcf.ckp_scn df_checkpoint_change#, bcf.ckp_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM bcf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = bcf.dbinc_key UNION ALL SELECT -- Backup Redo Log brl.bs_key list_order1, 2 list_order2, brl.brl_key pkey, backupset_txt backup_type, archivedlog_txt file_type, null keep, null keep_until, null keep_options, null status, null fname, null tag, null media, brl.brl_recid recid, brl.brl_stamp stamp, null device_type, brl.block_size block_size, null completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, brl.block_size * brl.blocks bytes, brl.bs_key bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, null df_file#, null df_tablespace, null df_resetlogs_change#, null df_creation_change#, null df_checkpoint_change#, null df_ckp_mod_time, null df_incremental_change#, brl.thread# rl_thread#, brl.sequence# rl_sequence#, dbinc.reset_scn rl_resetlogs_change#, brl.low_scn rl_first_change#, brl.low_time rl_first_time, brl.next_scn rl_next_change#, brl.next_time rl_next_time, null sf_db_unique_name FROM brl, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = brl.dbinc_key UNION ALL SELECT -- Backup spfile bsf.bs_key list_order1, 2 list_order2, bsf.bsf_key pkey, backupset_txt backup_type, spfile_txt file_type, null keep, null keep_until, null keep_options, null status, null fname, null tag, null media, bsf.bsf_recid recid, bsf.bsf_stamp stamp, null device_type, 0 block_size, null completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, bsf.bytes bytes, bsf.bs_key bs_key, null bs_count, null bs_stamp, null bs_type, full_txt bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, null df_file#, null df_tablespace, 0 df_resetlogs_change#, 0 df_creation_change#, 0 df_checkpoint_change#, bsf.modification_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, db_unique_name sf_db_unique_name FROM bsf WHERE bsf.db_key = this_db_key UNION ALL SELECT -- Datafile Copy cdf.cdf_key list_order1, -1 list_order2, cdf.cdf_key pkey, copy_txt backup_type, datafile_txt file_type, decode(cdf.keep_options, 0, 'NO', 'YES') keep, cdf.keep_until keep_until, decode(cdf.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, decode(cdf.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, cdf.fname fname, cdf.tag tag, null media, cdf.cdf_recid recid, cdf.cdf_stamp stamp, 'DISK' device_type, cdf.block_size block_size, cdf.completion_time completion_time, cdf.is_recovery_dest_file is_rdf, null compressed, null obsolete, null keep_for_dbpitr, cdf.block_size * cdf.blocks bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, cdf.file# df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, cdf.create_scn df_creation_change#, cdf.ckp_scn df_checkpoint_change#, cdf.ckp_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM cdf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = cdf.dbinc_key AND ((user_site_key = cdf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(cdf.site_key, this_site_key))))) UNION ALL SELECT -- Controlfile Copy ccf.ccf_key list_order1, -1 list_order2, ccf.ccf_key pkey, copy_txt backup_type, controlfile_txt file_type, decode(ccf.keep_options, 0, 'NO', 'YES') keep, ccf.keep_until keep_until, decode(ccf.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, decode(ccf.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, ccf.fname fname, ccf.tag tag, null media, ccf.ccf_recid recid, ccf.ccf_stamp stamp, 'DISK' device_type, ccf.block_size block_size, ccf.completion_time completion_time, ccf.is_recovery_dest_file is_rdf, null compressed, null obsolete, null keep_for_dbpitr, null bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, 0 df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, 0 df_creation_change#, ccf.ckp_scn df_checkpoint_change#, ccf.ckp_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM ccf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = ccf.dbinc_key AND ((user_site_key = ccf.site_key) OR (user_site_key IS NULL AND ((disk_backups_shared = TRUE#) OR (this_site_key = nvl(ccf.site_key, this_site_key))))) UNION ALL SELECT -- Archived Redo Log al.al_key list_order1, -1 list_order2, al.al_key pkey, copy_txt backup_type, archivedlog_txt file_type, null keep, null keep_until, null keep_options, decode(al.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, al.fname fname, null tag, null media, al.al_recid recid, al.al_stamp stamp, 'DISK' device_type, al.block_size block_size, al.completion_time completion_time, al.is_recovery_dest_file is_rdf, al.compressed compressed, null obsolete, null keep_for_dbpitr, al.block_size * al.blocks bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, null df_file#, null df_tablespace, null df_resetlogs_change#, null df_creation_change#, null df_checkpoint_change#, null df_ckp_mod_time, null df_incremental_change#, al.thread# rl_thread#, al.sequence# rl_sequence#, dbinc.reset_scn rl_resetlogs_change#, al.low_scn rl_first_change#, al.low_time rl_first_time, al.next_scn rl_next_change#, al.next_time rl_next_time, null sf_db_unique_name FROM dbinc, al LEFT OUTER JOIN grsp ON al.next_scn >= grsp.from_scn AND al.low_scn <= (grsp.to_scn + 1) AND al.dbinc_key = grsp.dbinc_key AND grsp.from_scn <= grsp.to_scn -- filter clean grp AND grsp.from_scn != 0 AND grsp.guaranteed = 'YES' WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = al.dbinc_key AND al.archived = 'Y' AND (lb_NeedObsoleteData =TRUE#) AND grsp.from_scn is null AND ((client_site_aware = TRUE# AND ((user_site_key = al.site_key) OR -- interested in specific site (user_site_key IS NULL AND ((logs_shared = TRUE#) OR (this_site_key = nvl(al.site_key, this_site_key)))))) OR (client_site_aware = FALSE#)) UNION ALL SELECT -- Datafile Proxy Copy xdf.xdf_key list_order1, -1 list_order2, xdf.xdf_key pkey, proxycopy_txt backup_type, datafile_txt file_type, decode(xdf.keep_options, 0, 'NO', 'YES') keep, xdf.keep_until keep_until, decode(xdf.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, decode(xdf.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, xdf.handle fname, xdf.tag tag, xdf.media media, xdf.xdf_recid recid, xdf.xdf_stamp stamp, xdf.device_type device_type, xdf.block_size block_size, xdf.completion_time completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, xdf.block_size * xdf.blocks bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, xdf.file# df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, xdf.create_scn df_creation_change#, xdf.ckp_scn df_checkpoint_change#, xdf.ckp_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM xdf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = xdf.dbinc_key AND ((user_site_key = xdf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xdf.site_key, this_site_key))))) UNION ALL SELECT -- Controlfile Proxy Copy xcf.xcf_key list_order1, -1 list_order2, xcf.xcf_key pkey, proxycopy_txt backup_type, controlfile_txt file_type, decode(xcf.keep_options, 0, 'NO', 'YES') keep, xcf.keep_until keep_until, decode(xcf.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, decode(xcf.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, xcf.handle fname, xcf.tag tag, xcf.media media, xcf.xcf_recid recid, xcf.xcf_stamp stamp, xcf.device_type device_type, xcf.block_size block_size, xcf.completion_time completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, null bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, 0 df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, 0 df_creation_change#, xcf.ckp_scn df_checkpoint_change#, xcf.ckp_time df_ckp_mod_time, null df_incremental_change#, null rl_thread#, null rl_sequence#, null rl_resetlogs_change#, null rl_first_change#, null rl_first_time, null rl_next_change#, null rl_next_time, null sf_db_unique_name FROM xcf, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = xcf.dbinc_key AND ((user_site_key = xcf.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xcf.site_key, this_site_key))))) UNION ALL SELECT -- Archivelog Proxy Copy xal.xal_key list_order1, -1 list_order2, xal.xal_key pkey, proxycopy_txt backup_type, archivedlog_txt file_type, decode(xal.keep_options, 0, 'NO', 'YES') keep, xal.keep_until keep_until, decode(xal.keep_options, 256, 'LOGS', 512, 'NOLOGS', 1024, 'BACKUP_LOGS', NULL) keep_options, decode(xal.status, 'A', available_txt, 'U', unavailable_txt, 'X', expired_txt, other_txt) status, xal.handle fname, xal.tag tag, xal.media media, xal.xal_recid recid, xal.xal_stamp stamp, xal.device_type device_type, xal.block_size block_size, xal.completion_time completion_time, 'NO' is_rdf, null compressed, null obsolete, null keep_for_dbpitr, xal.block_size * xal.blocks bytes, null bs_key, null bs_count, null bs_stamp, null bs_type, null bs_incr_type, null bs_pieces, null bs_copies, null bs_completion_time, null bs_status, null bs_bytes, null bs_compressed, null bs_tag, null bs_device_type, null bp_piece#, null bp_copy#, 0 df_file#, null df_tablespace, dbinc.reset_scn df_resetlogs_change#, 0 df_creation_change#, null df_checkpoint_change#, null df_ckp_mod_time, null df_incremental_change#, xal.thread# rl_thread#, xal.sequence# rl_sequence#, dbinc.reset_scn rl_resetlogs_change#, xal.low_scn rl_first_change#, xal.low_time rl_first_time, xal.next_scn rl_next_change#, xal.next_time rl_next_time, null sf_db_unique_name FROM xal, dbinc WHERE dbinc.db_key = this_db_key AND dbinc.dbinc_key = xal.dbinc_key AND ((user_site_key = xal.site_key) OR (user_site_key IS NULL AND ((tape_backups_shared = TRUE#) OR (this_site_key = nvl(xal.site_key, this_site_key))))) -- We order by list_order so that object from same backupset -- (backup datafiles, backupset, and piece records) came together. ORDER BY list_order1, list_order2, bp_piece#; END openLbCursor; ------------------------------ DbUniqueNameIsStandby ------------------------- -- returns non-zero value if the current target database is standby FUNCTION DbUniqueNameIsStandby RETURN NUMBER IS is_standby number; BEGIN deb(DEB_ENTER, 'DbUniqueNameIsStandby'); IF NOT this_dummy_instance THEN -- if the connected database site is STANDBY return TRUE, Otherwise -- return FALSE. SELECT count(*) INTO is_standby FROM node WHERE node.db_key = this_db_key AND node.db_unique_name = this_db_unique_name AND node.database_role = 'STANDBY'; END IF; IF is_standby is NULL THEN is_standby := 0; END IF; deb(DEB_EXIT, ' with ' || is_standby); return is_standby; END DbUniqueNameIsStandby; ------------------------------ clrSiteName ----------------------------------- -- clear the state of package variables that control file name translation for -- file sharing attributes and specific site file names request.. PROCEDURE clrSiteName IS BEGIN deb(DEB_ENTER, 'clrSiteName, user_site_key, realf_site_key set to null'); user_site_key := NULL; realf_site_key := NULL; user_db_unique_name := NULL; deb(DEB_EXIT); END clrSiteName; ------------------------------ getSiteKey ------------------------------------ FUNCTION getSiteKey(db_unique_name IN VARCHAR2) RETURN NUMBER IS resynced NUMBER; ret_site_key NUMBER; BEGIN -- If from_db_unique_name is provided, validated that it is known -- db_unique_name for the databse. IF (this_db_key IS NULL) THEN raise_application_error(-20021, 'database not set'); END IF; -- check if site is known to the recovery catalog. SELECT count(*) INTO resynced FROM node WHERE node.db_unique_name = upper(getSiteKey.db_unique_name) AND node.db_key = this_db_key; IF resynced = 0 THEN raise_application_error(-20243, getSiteKey.db_unique_name || 'site unknown to recovery catalog:'); END IF; SELECT site_key into ret_site_key FROM node WHERE node.db_unique_name = upper(getSiteKey.db_unique_name) AND node.db_key = this_db_key; RETURN ret_site_key; END getSiteKey; ------------------------------ getSiteName ----------------------------------- FUNCTION getSiteName(site_key IN NUMBER) RETURN VARCHAR2 IS ldb_unique_name node.db_unique_name%TYPE; BEGIN deb(DEB_ENTER, 'getSiteName, site_key=' || site_key); SELECT db_unique_name INTO ldb_unique_name FROM node WHERE site_key = getSiteName.site_key; deb(DEB_EXIT, ' with ' || ldb_unique_name); RETURN ldb_unique_name; END getSiteName; ------------------------------ setSiteName ----------------------------------- PROCEDURE setSiteName(db_unique_name IN VARCHAR2, for_realfiles IN NUMBER) IS BEGIN deb(DEB_ENTER, 'setSiteName'||db_unique_name); If db_unique_name IS NOT NULL THEN IF for_realfiles != 0 THEN realf_site_key := getSiteKey(db_unique_name); ELSE user_site_key := getSiteKey(db_unique_name); user_db_unique_name := upper(db_unique_name); END IF; deb(DEB_PRINT, 'user_site_key='|| user_site_key); deb(DEB_PRINT, 'realf_site_key=' || realf_site_key); deb(DEB_PRINT, 'user_db_unique_name='||user_db_unique_name); END IF; deb(DEB_EXIT); END setSiteName; ------------------------------ setArchiveFileScopeAttributes ----------------- -- set Archive log file sharing scope attributes for the session PROCEDURE setArchiveFileScopeAttributes(logs_shared IN NUMBER) IS BEGIN deb(DEB_ENTER, 'setArchiveFileScopeAttributes'); IF logs_shared > 0 THEN dbms_rcvman.logs_shared := TRUE#; ELSE dbms_rcvman.logs_shared := FALSE#; END IF; deb(DEB_PRINT, 'logs_shared = ' || dbms_rcvman.logs_shared); deb(DEB_EXIT); END setArchiveFileScopeAttributes; ------------------------------ setBackupFileScopeAttributes ------------------ -- set Backup file sharing scope attributes for the session PROCEDURE setBackupFileScopeAttributes( disk_backups_shared IN NUMBER, tape_backups_shared IN NUMBER) IS lsite_key NUMBER; BEGIN deb(DEB_ENTER, 'setBackupFileScopeAttributes'); IF disk_backups_shared IS NOT NULL THEN IF disk_backups_shared > 0 THEN dbms_rcvman.disk_backups_shared := TRUE#; ELSE dbms_rcvman.disk_backups_shared := FALSE#; END IF; END IF; IF tape_backups_shared IS NOT NULL THEN IF tape_backups_shared > 0 THEN dbms_rcvman.tape_backups_shared := TRUE#; ELSE dbms_rcvman.tape_backups_shared := FALSE#; END IF; END IF; deb(DEB_PRINT, 'disk_backups_shared='||dbms_rcvman.disk_backups_shared); deb(DEB_PRINT, 'tape_backups_shared='||dbms_rcvman.tape_backups_shared); deb(DEB_EXIT); END setBackupFileScopeAttributes; ------------------------------- addBackupToMKL -------------------------------- PROCEDURE addBackupToMKL(lbMkTab IN OUT NOCOPY rcvRecTabII_t ,rcvRec IN rcvRec_t) IS nelem number; -- number of elements in each bucket key number; -- key to be added to table bucket number; i binary_integer; rcvRecI rcvRecTabI_t; BEGIN IF (rcvRec.type_con = backupSet_con_t) THEN key := rcvRec.bsKey_con; ELSIF (rcvRec.type_con = imageCopy_con_t OR rcvRec.type_con = proxyCopy_con_t) THEN key := rcvRec.key_con; ELSE raise_application_error(-20999, 'internal error: addBackupToMKL' || ' type=' || rcvRec.type_con); END IF; bucket := mod(key, CONST4GVAL); IF (bucket >= CONST2GVAL) THEN i := CONST2GVAL - bucket; ELSE i := bucket; END IF; IF (NOT lbMkTab.exists(i)) THEN lbMkTab(i) := rcvRecI; END IF; -- skip if this is a duplicate in a must keep list nelem := lbMkTab(i).count; IF (nelem > 0) THEN FOR j in 0 ..nelem-1 LOOP IF (rcvRec.type_con = lbMkTab(i)(j).type_con) THEN -- for backupset, just key or set_stamp, set_count match is enough IF (rcvRec.type_con = backupSet_con_t AND (lbMkTab(i)(j).bsKey_con = rcvRec.bsKey_con OR (lbMkTab(i)(j).setStamp_con = rcvRec.setStamp_con AND lbMkTab(i)(j).setCount_con = rcvRec.setCount_con))) THEN RETURN; ELSIF ((rcvRec.type_con = imageCopy_con_t AND rcvRec.type_con = proxyCopy_con_t) OR lbMkTab(i)(j).recid_con = rcvRec.recid_con AND lbMkTab(i)(j).stamp_con = rcvRec.stamp_con AND lbMkTab(i)(j).key_con = rcvRec.key_con) THEN RETURN; END IF; END IF; END LOOP; END IF; lbMkTab(i)(nelem) := rcvRec; END addBackupToMKL; ------------------------------- listBackupInMKL ------------------------------- -- Is backup in must keep list? FUNCTION listBackupInMKL(lbMkTab IN rcvRecTabII_t ,lbRec IN lbRec_t) RETURN BOOLEAN IS nelem number; -- number of elements in each bucket key number; -- key in question? bucket number; i binary_integer; BEGIN IF (lbRec.backup_type = backupset_txt) THEN key := lbRec.bs_key; ELSIF (lbRec.backup_type = copy_txt OR lbRec.backup_type = proxycopy_txt) THEN key := lbRec.pkey; ELSE raise_application_error(-20999, 'internal error: listBackupToMKL' || ' type=' || lbRec.backup_type); END IF; bucket := mod(key, CONST4GVAL); IF (bucket >= CONST2GVAL) THEN i := CONST2GVAL - bucket; ELSE i := bucket; END IF; IF (NOT lbMkTab.exists(i)) THEN RETURN FALSE; END IF; nelem := lbMkTab(i).count; FOR j in 0 ..nelem-1 LOOP -- for backupset, just key or set_stamp, set_count match is enough IF (lbMkTab(i)(j).type_con = backupSet_con_t AND lbRec.backup_type = backupset_txt AND (lbMkTab(i)(j).bsKey_con = lbRec.bs_key OR (lbMkTab(i)(j).setStamp_con = lbRec.bs_stamp AND lbMkTab(i)(j).setCount_con = lbRec.bs_count))) THEN RETURN TRUE; ELSIF (((lbMkTab(i)(j).type_con = imageCopy_con_t AND lbRec.backup_type = copy_txt) OR (lbMkTab(i)(j).type_con = proxyCopy_con_t AND lbRec.backup_type = proxycopy_txt)) AND lbMkTab(i)(j).recid_con = lbRec.recid AND lbMkTab(i)(j).stamp_con = lbRec.stamp AND lbMkTab(i)(j).key_con = lbRec.pkey) THEN RETURN TRUE; END IF; END LOOP; RETURN FALSE; END listBackupInMKL; -- Set package variables so that the next archivelog translation returns all -- logs from the last archivelog backup until standby became primary scn -- ignoring needstby flag PROCEDURE SetGetSinceLastBackedAL(ntimes IN number DEFAULT 1, devtype IN varchar2 DEFAULT NULL, sbpscn IN number) IS last_alrec sinceLastBackedAL_c%ROWTYPE; BEGIN deb(DEB_ENTER, 'SetGetSinceLastBackedAl'); -- initialize lbacked_al_next_scn := 0; standby_became_primary_scn := 0; IF client_site_aware = TRUE# or sbpscn IS NULL or sbpscn = 0 THEN deb(DEB_IN, 'SetGetSinceLastBackedAl: lbacked_al_next_scn is 0'); ELSE -- during failover first_change# of log generated during the failover -- operations starts with standby became primary scn + 1, hence incr -- the value here to return the log generated by terminal recovery in -- the archivelog translation. standby_became_primary_scn := sbpscn + 1; OPEN sinceLastBackedAL_c(devtype, ntimes); FETCH sinceLastBackedAL_c into last_alrec; IF NOT sinceLastBackedAL_c%NOTFOUND THEN lbacked_al_next_scn := nvl(last_alrec.next_scn, last_alrec.low_scn); END IF; CLOSE sinceLastBackedAL_c; END IF; deb(DEB_IN, 'SetGetSinceLastBackedAl: al_next_scn=' || lbacked_al_next_scn || ' sbpscn=' || standby_became_primary_scn); deb(DEB_EXIT, 'SetGetSinceLastBackedAl'); END SetGetSinceLastBackedAL; -- Return number of tablespaces that have encryption configuration turned on FUNCTION getEncryptTSCount RETURN BINARY_INTEGER IS encrypt_ts_count NUMBER; BEGIN SELECT count(*) into encrypt_ts_count FROM rc_tablespace WHERE dbinc_key=this_dbinc_key AND encrypt_in_backup = 'ON'; RETURN encrypt_ts_count; END getEncryptTSCount; ----------------------------- getArchivedNextSCN ----------------------------- -- Note that when computing the next archived SCN, we look at records from all -- database sites in DG environment, unlike filtering backups based on -- file accessiblity attributes. It is done so as to get the correct next -- SCN, looking at all information in catalog. FUNCTION getArchivedNextSCN RETURN NUMBER IS mySCN number; BEGIN deb(DEB_ENTER, 'getArchivedNextSCN'); SELECT nvl(max(al.next_scn),0) INTO mySCN FROM al, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE al.archived = 'Y' AND al.dbinc_key = d2.dbinc_key AND (d2.next_reset_scn IS NULL OR (al.low_scn >= d2.reset_scn AND al.low_scn < d2.next_reset_scn)); SELECT greatest(nvl(max(brl.next_scn), 0), mySCN) INTO mySCN FROM brl, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE brl.dbinc_key = d2.dbinc_key AND (d2.next_reset_scn IS NULL OR (brl.low_scn >= d2.reset_scn AND brl.low_scn < d2.next_reset_scn)); SELECT greatest(nvl(max(xal.next_scn), 0), mySCN) INTO mySCN FROM xal, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE xal.dbinc_key = d2.dbinc_key AND (d2.next_reset_scn IS NULL OR (xal.low_scn >= d2.reset_scn AND xal.low_scn < d2.next_reset_scn)); deb(DEB_EXIT, 'with '||to_char(mySCN)); RETURN mySCN; END getArchivedNextSCN; --------------------------- isArchivedLogMissing ---------------------------- FUNCTION isArchivedLogMissing(fromSCN IN NUMBER, untilSCN IN NUMBER) RETURN number IS thread number; sequence number; dbinc_key number; BEGIN deb(DEB_ENTER, 'isArchivedLogMissing'); deb(DEB_IN, 'fromSCN =' || nvl(to_char(fromSCN), 'NULL') || ' untilSCN=' || nvl(to_char(untilSCN), 'NULL')); -- Following query checks if the archived logs are sequential needed for -- recovery. If there is a gap, it would return the first gap. SELECT thread#, sequence#, dbinc_key INTO thread, sequence, dbinc_key FROM (SELECT dbinc_key, thread#, sequence#, lead(sequence#, 1, sequence#+1) OVER (PARTITION BY thread#, dbinc_key ORDER BY sequence#) nextseq FROM (SELECT al.thread#, al.sequence#, al.dbinc_key FROM al, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE al.dbinc_key = d2.dbinc_key AND (d2.next_reset_scn IS NULL OR (al.low_scn >= d2.reset_scn AND al.low_scn < d2.next_reset_scn)) AND low_scn >= fromSCN AND (untilSCN IS NULL OR low_scn < untilSCN) UNION ALL SELECT brl.thread#, brl.sequence#, brl.dbinc_key FROM brl, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE brl.dbinc_key = d2.dbinc_key AND (d2.next_reset_scn IS NULL OR (brl.low_scn >= d2.reset_scn AND brl.low_scn < d2.next_reset_scn)) AND low_scn >= fromSCN AND (untilSCN IS NULL OR low_scn < untilSCN) UNION ALL SELECT xal.thread#, xal.sequence#, xal.dbinc_key FROM xal, (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key UNION ALL SELECT this_dbinc_key dbinc_key, null reset_scn, null next_reset_scn FROM dual) d2 WHERE xal.dbinc_key = d2.dbinc_key AND (d2.next_reset_scn IS NULL OR (xal.low_scn >= d2.reset_scn AND xal.low_scn < d2.next_reset_scn)) AND low_scn >= fromSCN AND (untilSCN IS NULL OR low_scn < untilSCN)) ) WHERE nextseq NOT IN (sequence#, sequence#+1) AND rownum = 1; deb(DEB_IN, 'missing sequence is (dbinc_key, thread, sequence)=('|| to_char(dbinc_key) || ',' || to_char(thread) || ',' || to_char(sequence+1) || ')'); deb(DEB_EXIT, 'with TRUE'); RETURN TRUE#; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with FALSE'); RETURN FALSE#; END isArchivedLogMissing; ----------------------------- getIncarnationKey ----------------------------- FUNCTION getIncarnationKey(untilSCN IN NUMBER) RETURN number IS myInc number; BEGIN deb(DEB_ENTER, 'getIncarnationKey'); IF (untilSCN <= this_reset_scn) THEN SELECT dbinc.dbinc_key INTO myInc FROM (SELECT dbinc_key, reset_scn, PRIOR reset_scn next_reset_scn FROM dbinc START WITH dbinc_key = this_dbinc_key CONNECT BY PRIOR parent_dbinc_key = dbinc_key) dbinc WHERE dbinc.reset_scn < untilSCN AND dbinc.next_reset_scn >= untilSCN; ELSE myInc := 0; END IF; deb(DEB_EXIT, 'with incKey=' || to_char(myInc)); RETURN myInc; END getIncarnationKey; ------------------------------------ getMaxScn -------------------------------- FUNCTION getMaxScn RETURN number IS logmaxnt date; BEGIN return getMaxScn(logmaxnt); END; FUNCTION getMaxScn(logmaxnt OUT date) return number IS minmaxnc number; minmaxnt date; BEGIN IF (this_dbinc_key is NULL) THEN deb(DEB_EXIT, 'with error 20020'); raise_application_error(-20020, 'Database incarnation not set'); END IF; /* Bug 2377581: Find SCN from enabled threads only. */ select min(maxnc), min(maxnt) into minmaxnc, minmaxnt from (select max(next_scn) maxnc, max(NEXT_TIME) maxnt from (select a.next_scn, a.next_time, a.thread# tno from al a, rt t where a.THREAD# = t.THREAD# and a.ARCHIVED = 'Y' and t.status not in ('D', 'I') and t.dbinc_key = this_dbinc_key and a.dbinc_key = this_dbinc_key union select b.next_scn, b.next_time, b.thread# tno from brl b, rt t where b.THREAD# = t.THREAD# and t.status not in ('D', 'I') and t.dbinc_key = this_dbinc_key and b.dbinc_key = this_dbinc_key) group by tno); logmaxnt := minmaxnt; return minmaxnc; END getMaxScn; ----------------------------- getActualDbinc ------------------------------- FUNCTION getActualDbinc RETURN NUMBER IS BEGIN return actual_dbinc_key; END getActualDbinc; -- -- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -- -- The code between BEGIN_PUBCOMMON_RCVMAN_CODE and END_PUBCOMMON_RCVMAN_CODE -- is included also in the target database version of the RCVMAN package (that -- is prvtrmns.pls). The processing is done by Makefile in sqlexec/fixedpkg. -- -- !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -- -- -- BEGIN_PUBCOMMON_RCVMAN_CODE --------------- -- Debugging -- --------------- ---------------------------------- dumpState ---------------------------------- FUNCTION dumpState( lineno IN number) RETURN varchar2 IS BEGIN IF lineno = 1 THEN RETURN 'this_db_key='||this_db_key; ELSIF lineno = 2 THEN RETURN 'this_dbinc_key='||this_dbinc_key; ELSIF lineno = 3 THEN RETURN 'this_reset_scn='||this_reset_scn; ELSIF lineno = 4 THEN RETURN 'this_reset_time='||this_reset_time; ELSIF lineno = 5 THEN RETURN 'untilSCN='||untilSCN; ELSIF lineno = 6 THEN RETURN 'untilTime='||untilTime; ELSIF lineno = 7 THEN RETURN 'getRA_completedAfter='||getRA_completedAfter; ELSIF lineno = 8 THEN RETURN 'getRA_completedBefore='||getRA_completedBefore; ELSIF lineno = 9 THEN RETURN 'getRA_likePattern='||getRA_likePattern; ELSIF lineno = 10 THEN RETURN 'getRA_containerMask='||getRA_containerMask; ELSIF lineno = 11 THEN RETURN 'getRA_actionMask='||getRA_actionMask; ELSIF lineno = 12 THEN RETURN 'computeRA_allRecords='||computeRA_allRecords; ELSIF lineno = 13 THEN RETURN 'computeRA_fullBackups='|| nvl(to_char(computeRA_fullBackups), 'NULL'); ELSIF lineno = 14 THEN RETURN 'allIncarnations='||allIncarnations; ELSE RETURN NULL; END IF; END dumpState; -------------------------------- dumpPkgState --------------------------------- PROCEDURE dumpPkgState(msg in varchar2 default null) IS line varchar2(132); lno number := 1; BEGIN deb(DEB_ENTER, 'dumpPkgState ' || nvl(msg, ' ')); loop line := dumpState(lno); if line is NULL then exit; else deb(DEB_PRINT, line); lno := lno + 1; end if; end loop; deb(DEB_EXIT); END dumpPkgState; ---------------------------------- setDebugOn --------------------------------- PROCEDURE setDebugOn IS BEGIN if debug is null or not debug then -- -- Passing buffer_size as null is an undocumented way to buffer -- unlimited number of rows. PL/SQL storage is the limit -- dbms_output.enable(buffer_size => null); debug := TRUE; else deb(DEB_PRINT, 'Debug on - debug lready enabled'); end if; END setDebugOn; --------------------------------- setDebugOff --------------------------------- PROCEDURE setDebugOff IS BEGIN dbms_output.disable; -- free memory debug := FALSE; END setDebugOff; ---------------------------- -- Package Initialization -- ---------------------------- -- This is a vestigal function that was released to customers in 8.1.3 Beta. -- It is no longer called, and is no longer needed, but must still be here -- because this version of the package may be called by an 8.1.3 rman -- executable. ---------------------------------- initialize --------------------------------- PROCEDURE initialize( rman_vsn IN number) IS BEGIN NULL; END initialize; ---------------------------- set_package_constants ---------------------------- PROCEDURE set_package_constants IS BEGIN -- This procedure exists only for backwards compatibility with RMAN 8.1.5. -- This package no longer cares what the values are. The rddf cursor -- now implements the order-by for preference using hard-coded values -- that are not returned to the client. NULL; END set_package_constants; ----------------------- -- Utility functions -- ----------------------- ---------------------------------- stamp2date --------------------------------- FUNCTION stamp2date( stamp IN number) RETURN date IS x number; dt varchar2(19); BEGIN x := stamp; dt := to_char(mod(x,60), 'FM09'); -- seconds x := floor(x/60); dt := to_char(mod(x,60), 'FM09') || ':' || dt; -- minutes x := floor(x/60); dt := to_char(mod(x,24), 'FM09') || ':' || dt; -- hours x := floor(x/24); dt := to_char(mod(x,31)+1, 'FM09') || ' ' || dt; -- days x := floor(x/31); dt := to_char(mod(x,12)+1, 'FM09') || '/' || dt; -- months dt := to_char(floor(x/12)+1988) || '/' || dt; RETURN to_date(dt, 'YYYY/MM/DD HH24:MI:SS'); END stamp2date; ------------------- -- Query Filters -- ------------------- -- Obsolete as of 8.1.6 ---------------------------------- setAllFlag --------------------------------- PROCEDURE setAllFlag( flag IN boolean) IS BEGIN setAllIncarnations(flag); IF (flag) THEN ignoreCreationSCN := TRUE#; ELSE ignoreCreationSCN := FALSE#; END IF; END setAllFlag; --------------------------------- getUntilTime -------------------------------- FUNCTION getUntilTime RETURN date IS BEGIN RETURN untilTime; END getUntilTime; --------------------------------- getUntilScn --------------------------------- FUNCTION getUntilScn RETURN number IS BEGIN RETURN untilScn; END getUntilScn; ---------------------------------- resetUntil --------------------------------- PROCEDURE resetUntil IS BEGIN untilSCN := NULL; untilTime := NULL; rpoint_set := FALSE; END resetUntil; ----------------------------------- setFrom ----------------------------------- PROCEDURE setFrom( restorefrom IN number DEFAULT NULL) IS BEGIN IF (restorefrom = BACKUP) THEN restoreSource := backupSet_con_t + proxyCopy_con_t; ELSIF (restorefrom = COPY) THEN restoreSource := imageCopy_con_t; ELSIF (restorefrom = NONPROXY) THEN restoreSource := imageCopy_con_t + backupSet_con_t; ELSIF (restorefrom is NULL) THEN restoreSource := NULL; ELSE raise_application_error(-20200, 'Invalid restore source'); END IF; END setFrom; -------------------------------- setDeviceType -------------------------------- PROCEDURE setDeviceType( type IN varchar2) IS BEGIN IF (deviceCount >= 8) THEN raise_application_error(-20280, 'Too many device types'); END IF; deviceCount := deviceCount + 1; deviceList(deviceCount) := type; IF (type = 'DISK') THEN diskDevice := TRUE; END IF; END setDeviceType; ----------------------------------- setStandby ------------------------------ PROCEDURE setStandby( stby IN boolean) IS BEGIN if stby is NULL then onlyStandby := NULL; elsif stby then onlyStandby := TRUE#; else onlyStandby := FALSE#; end if; END setStandby; ------------------------------- setDeviceTypeAny ------------------------------ PROCEDURE setDeviceTypeAny IS BEGIN diskDevice := TRUE; anyDevice := TRUE#; deviceCount := 0; END setDeviceTypeAny; ------------------------------- resetDeviceType ------------------------------- PROCEDURE resetDeviceType IS BEGIN FOR i in 1..8 LOOP deviceList(i) := NULL; END LOOP; deviceCount := 0; diskDevice := FALSE; anyDevice := FALSE#; END resetDeviceType; ------------------------------------ setTag ----------------------------------- PROCEDURE setTag( tag IN varchar2 DEFAULT NULL) IS BEGIN restoreTag := tag; END setTag; ---------------------------- setRecoveryDestFile ------------------------------ PROCEDURE setRecoveryDestFile(onlyrdf IN BOOLEAN) IS BEGIN if onlyrdf then recoveryDestFile := TRUE; else recoveryDestFile := FALSE; end if; END setRecoveryDestFile; --------------------------- -- Backup Set Validation -- --------------------------- ------------------------------ getValidBackupSet ------------------------------ FUNCTION getValidBackupSet( validBackupSetRec OUT NOCOPY validBackupSetRec_t ,checkDeviceIsAllocated IN number DEFAULT FALSE#) RETURN number IS lastCode number; checkRc number; local validBackupSetRec_t; BEGIN IF (bsRecCacheEnabled) THEN RETURN cacheGetValidBackupSet( validBackupSetRec => validBackupSetRec, checkDeviceIsAllocated => checkDeviceIsAllocated); END IF; deb(DEB_ENTER, 'getValidBackupSet'); -- If the cursor is not open, just return FALSE#. IF (getValidBackupSetCursor = 'findValidBackupSet1P_c') THEN IF (NOT findValidBackupSet1P_c%ISOPEN) THEN RETURN FALSE#; END IF; ELSIF (getValidBackupSetCursor = 'findValidBackupSet_c') THEN IF (NOT findValidBackupSet_c%ISOPEN) THEN RETURN FALSE#; END IF; ELSE raise_application_error(-20204, 'Translation not started'); END IF; <> IF (getValidBackupSetCursor = 'findValidBackupSet1P_c') THEN FETCH findValidBackupSet1P_c INTO local; IF (findValidBackupSet1P_c%NOTFOUND) THEN CLOSE findValidBackupSet1P_c; deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END IF; ELSIF (getValidBackupSetCursor = 'findValidBackupSet_c') THEN FETCH findValidBackupSet_c INTO local; IF (findValidBackupSet_c%NOTFOUND) THEN CLOSE findValidBackupSet_c; deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END IF; END IF; lastCode := getValidBackupSetLast.code; -- save for test below getValidBackupSetLast := local; -- save for next time here IF (local.code <= lastCode) THEN -- The findValidBackupSet cursor returns 4 colums: deviceType, tag, -- copy#, and a code (1,2,3). If the code is 1, it means we found a -- complete set of pieces with the same deviceType, tag, and copy#. We -- always want to process records of type 1. If the code is 2, it means -- we ignored copy#, and found a complete set of pieces with the same -- deviceType and tag. We only want to process records of type 2 if -- we did not see this deviceType/tag combination with code 1. -- A code 3 record means we ignored copy# and tag, and found a complete -- set of pieces with the same deviceType. We only want to process -- records of code 3 if we did not see any records of type 1 or 2 -- for this device type. -- The order by allows us to implement this very easily. We return a -- record only if its code is <= the code for the last record we -- fetched. This works because if there is a code 2 record, then it -- will be followed by a code 3 record. If there is a code 1 record, it -- will be followed by a code 2 and then code 3 record. -- Since this record's code is <= the previous record's code, we -- must be looking at a different deviceType, tag, and/or copy# than -- before, or this is the first record. In either case, we want to -- process the record. IF (checkDeviceIsAllocated = TRUE#) THEN IF (anyDevice = FALSE# AND isDeviceTypeAllocated(local.deviceType) = FALSE#) THEN deb(DEB_IN, 'device type not allocated: ' || local.deviceType); GOTO nextRow; END IF; END IF; validBackupSetRec := local; -- set OUT mode arg deb(DEB_IN, 'returning valid rec deviceType=' || local.deviceType || ' tag=' || local.tag || ' copyNumber=' || to_char(local.copyNumber)); deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; ELSE deb(DEB_IN, ' local.code=' || to_char(local.code) || ' lastCode=' || to_char(lastCode)); GOTO nextRow; END IF; deb(DEB_EXIT); END getValidBackupSet; --------------------- -- Get an rcvRec_t -- --------------------- ---------------------------------- getRcvRec ---------------------------------- FUNCTION getRcvRec( funCode IN number ,rcvRec OUT NOCOPY rcvRec_t ,callAgain OUT number) RETURN number IS rc number; BEGIN deb(DEB_ENTER, 'getRcvRec'); rc := 0; -- init for procedures callAgain := TRUE#; deb(DEB_IN, ' funCode=' || to_char(funCode)); IF (funCode = getCfCopy) THEN getControlfileCopy(rcvRec); ELSIF (funCode = getDfCopy) THEN getDatafileCopy(rcvRec); ELSIF (funCode = getAnyProxy) THEN getProxyCopy(rcvRec); ELSIF (funCode = getCfBackup) THEN rc := getControlfileBackup(rcvRec); IF (rc != SUCCESS OR NOT findControlfileBackupCursor) THEN callAgain := FALSE#; END IF; ELSIF (funCode = getSfBackup) THEN rc := getSpfileBackup(rcvRec => rcvRec); IF (rc != SUCCESS OR NOT findSpfileBackupCursor) THEN callAgain := FALSE#; END IF; ELSIF (funCode = listCfCopy) THEN listGetControlfileCopy(rcvRec); ELSIF (funCode = listDfCopy) THEN listGetDatafileCopy(rcvRec); ELSIF (funCode = listCfBackup) THEN listGetControlfileBackup(rcvRec); ELSIF (funCode = listSfBackup) THEN listGetSpfileBackup(rcvRec); ELSIF (funCode = listDfBackup) THEN listGetDatafileBackup(rcvRec); ELSIF (funCode = listAlBackup) THEN listGetArchivedLogBackup(rcvRec); ELSIF (funCode = listDfProxy) THEN listGetProxyDatafile(rcvRec); ELSIF (funCode = listAlProxy) THEN listGetProxyArchivedLog(rcvRec); ELSIF (funCode = getRecovAction) THEN callAgain := getRecoveryAction(rcvRec); ELSIF (funCode = getAlBackup) THEN rc := getArchivedLogBackup(rcvRec); IF (rc != SUCCESS) THEN callAgain := FALSE#; END IF; ELSIF (funCode = getRangeAlBackup) THEN rc := getRangeArchivedLogBackup(rcvRec); IF (rc = UNAVAILABLE) THEN callAgain := FALSE#; END IF; ELSIF (funCode = listAlCopy) THEN listGetArchivedLogCopy(rcvRec); ELSIF (funCode = listBSet) THEN listGetBackupsetFiles(rcvRec); ELSIF (funCode = getAllBSet) THEN getAllBackupSet(rcvRec); ELSE deb(DEB_EXIT, 'with error 20999'); raise_application_error(-20999, 'getRcvRec: unknown funCode: ' || to_char(funCode)); END IF; IF (debug) THEN printRcvRec(rcvRec); deb(DEB_EXIT, 'with rc:'||TO_CHAR(rc)); END IF; -- If a backup set is returned, we could write a query here to find if -- any pieces are accessible. But since RMAN client never gets backup sets -- when no pieces are accessible, no need to make any changes here. RETURN rc; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END getRcvRec; -------------------------- -- Datafile Translation -- -------------------------- ------------------------------ translateDatabase ------------------------------ PROCEDURE translateDatabase( sinceUntilSCN IN number DEFAULT NULL) IS fromSCN number; toSCN number; BEGIN deb(DEB_ENTER, 'translateDatabase'); validateState(getDatafileCursor); IF (untilSCN is NULL) THEN -- This range means: all datafiles that exist now fromSCN := MAXSCNVAL; toSCN := MAXSCNVAL; ELSE -- an until clause is in effect fromSCN := untilSCN; IF (sinceUntilSCN = TRUE#) THEN -- This flag means the caller wants datafiles that existed -- since the until SCN. So the translation range is: -- untilSCN...MAXSCNVAL toSCN := MAXSCNVAL; ELSE -- This is the normal case when an until clause is in effect. -- We want the datafiles that existed at the until SCN, but not -- any that were created after that. toSCN := fromSCN; END IF; END IF; deb(DEB_OPEN, 'translateDatabase_c'); OPEN translateDatabase_c(fromSCN, toSCN); getDatafileCursor := 'translateDatabase'; getDatafileNoRows.error := NULL; -- error not possible skipTablespaceCount := 0; getDatafileLast.dfNumber := NULL; -- no last row yet setDBTransClause; deb(DEB_EXIT); END translateDatabase; -- Public procedure to add a tablespace to the skip list. -------------------------------- skipTableSpace ------------------------------- PROCEDURE skipTableSpace( ts_name IN varchar2) IS BEGIN skipTablespaceCount := skipTablespaceCount + 1; skipTablespaceList(skipTablespaceCount) := ts_name; END skipTableSpace; ----------------------------- translateTablespace ----------------------------- PROCEDURE translateTablespace( ts_name IN varchar2) IS BEGIN deb(DEB_ENTER, 'translateTablespace'); validateState(getDatafileCursor); deb(DEB_OPEN, 'translateTablespace_c'); OPEN translateTablespace_c(tsName => ts_name); getDatafileCursor := 'translateTablespace'; getDatafileNoRows.error := -20202; getDatafileNoRows.msg := 'Tablespace does not exist'; skipTablespaceCount := 0; getDatafileLast.dfNumber := NULL; -- no last row yet deb(DEB_EXIT); END translateTablespace; ------------------------------ translateDataFile ------------------------------ PROCEDURE translateDataFile( fname IN varchar2) IS BEGIN deb(DEB_ENTER, 'translateDataFile_1'); validateState(getDatafileCursor); deb(DEB_OPEN, 'translateDatafileName'); OPEN translateDatafileName(fileName => fname); IF (untilSCN is NULL and untilTime is NULL) THEN getDatafileNoRows.error := -20201; getDatafileNoRows.msg := 'Datafile does not exist'; ELSE getDatafileNoRows.error := -20222; getDatafileNoRows.msg := 'Datafile name does not exist or is ambiguous'; END IF; getDatafileCursor := 'translateDatafileName'; skipTablespaceCount := 0; getDatafileLast.dfNumber := NULL; -- no last row yet deb(DEB_EXIT); END translateDatafile; ------------------------------ translateDataFile ------------------------------ PROCEDURE translateDataFile( fno IN number) IS BEGIN deb(DEB_ENTER, 'translateDataFile_2'); validateState(getDatafileCursor); deb(DEB_OPEN, 'translateDatafileNumber'); OPEN translateDatafileNumber(fno => fno); getDatafileCursor := 'translateDatafileNumber'; getDatafileNoRows.error := -20201; getDatafileNoRows.msg := 'Datafile does not exist'; skipTablespaceCount := 0; getDatafileLast.dfNumber := NULL; -- no last row yet setDfTransClause(fno => fno); deb(DEB_EXIT); END translateDatafile; ------------------------------ translateDataFile ------------------------------ PROCEDURE translateDataFile( fno IN number ,ckpscn IN number) IS BEGIN deb(DEB_ENTER, 'translateDataFile_3'); validateState(getDatafileCursor); deb(DEB_OPEN, 'translateDatafileCheckpoint'); OPEN translateDatafileCheckpoint(fno => fno, ckpSCN => ckpscn); getDatafileCursor := 'translateDatafileCheckpoint'; getDatafileNoRows.error := -20201; getDatafileNoRows.msg := 'Datafile does not exist'; skipTablespaceCount := 0; getDatafileLast.dfNumber := NULL; -- no last row yet setDfTransClause(fno => fno); deb(DEB_EXIT); END translateDatafile; ----------------------------- translateAllDataFile ---------------------------- PROCEDURE translateAllDatafile IS BEGIN deb(DEB_ENTER, 'translateAllDataFile'); IF (translateAllDatafile_c%ISOPEN) THEN CLOSE translateAllDatafile_c; END IF; deb(DEB_OPEN, 'translateAllDatafile_c'); OPEN translateAllDatafile_c; getDatafileCursor := 'translateAllDatafile'; getDatafileNoRows.error := NULL; -- error not possible skipTablespaceCount := 0; getDatafileLast.dfNumber := NULL; -- no last row yet setDBTransClause; deb(DEB_EXIT); END; ----------------------------- translateCorruptList----------------------------- PROCEDURE translateCorruptList IS BEGIN validateState(getDatafileCursor); OPEN translateCorruptList_c; getDatafileCursor := 'translateCorruptList'; getDatafileNoRows.error := -20504; getDatafileNoRows.msg := 'Corruption List does not exist'; skipTablespaceCount := 0; getDatafileLast.dfNumber := NULL; -- no last row yet END translateCorruptList; -- Main getDatafile routine --------------------------------- getDatafile --------------------------------- PROCEDURE getDatafile( dfRec OUT NOCOPY dfRec_t ,oldClient IN boolean DEFAULT FALSE) IS getDatafileRowcount number; local dfRec_t; BEGIN deb(DEB_ENTER, 'getDataFile_1'); <> IF (getDatafileCursor = 'translateDatabase') THEN FETCH translateDatabase_c INTO local; IF (translateDatabase_c%NOTFOUND) THEN -- Save rowcount before closing cursor getDatafileRowcount := translateDatabase_c%ROWCOUNT; CLOSE translateDatabase_c; END IF; ELSIF (getDatafileCursor = 'translateAllDatafile') THEN FETCH translateAllDatafile_c INTO local; IF (translateAllDatafile_c%NOTFOUND) THEN -- Save rowcount before closing cursor getDatafileRowcount := translateAllDatafile_c%ROWCOUNT; CLOSE translateAllDatafile_c; END IF; ELSIF (getDatafileCursor = 'translateTablespace') THEN FETCH translateTablespace_c INTO local; IF (translateTablespace_c%NOTFOUND) THEN -- Save rowcount before closing cursor getDatafileRowcount := translateTablespace_c%ROWCOUNT; CLOSE translateTablespace_c; END IF; ELSIF (getDatafileCursor = 'translateDatafileName') THEN FETCH translateDatafileName INTO local; IF (translateDatafileName%NOTFOUND) THEN -- Save rowcount before closing cursor getDatafileRowcount := translateDatafileName%ROWCOUNT; CLOSE translateDatafileName; END IF; IF (oldClient) THEN -- We won't be called again, so close the cursor now. IF (translateDatafileName%ISOPEN) THEN CLOSE translateDatafileName; END IF; getDatafileCursor := NULL; END IF; ELSIF (getDatafileCursor = 'translateDatafileNumber') THEN FETCH translateDatafileNumber INTO local; IF (translateDatafileNumber%NOTFOUND) THEN -- Save rowcount before closing cursor getDatafileRowcount := translateDatafileNumber%ROWCOUNT; CLOSE translateDatafileNumber; END IF; IF (oldClient) THEN -- We won't be called again, so close the cursor now. IF (translateDatafileNumber%ISOPEN) THEN CLOSE translateDatafileNumber; END IF; getDatafileCursor := NULL; END IF; ELSIF (getDatafileCursor = 'translateDatafileCheckpoint') THEN FETCH translateDatafileCheckpoint INTO local; IF (translateDatafileCheckpoint%NOTFOUND) THEN -- Save rowcount before closing cursor getDatafileRowcount := translateDatafileCheckpoint%ROWCOUNT; CLOSE translateDatafileCheckpoint; END IF; IF (oldClient) THEN -- We won't be called again, so close the cursor now. IF (translateDatafileCheckpoint%ISOPEN) THEN CLOSE translateDatafileCheckpoint; END IF; getDatafileCursor := NULL; END IF; ELSIF (getDatafileCursor = 'translateCorruptList') THEN FETCH translateCorruptList_c INTO local; IF (translateCorruptList_c%NOTFOUND) THEN -- Save rowcount before closing cursor getDatafileRowcount := translateCorruptList_c%ROWCOUNT; CLOSE translateCorruptList_c; END IF; ELSE deb(DEB_EXIT, 'with error 20204'); raise_application_error(-20204, 'Translation not started'); END IF; IF (getDatafileRowcount IS NOT NULL) THEN -- if %NOTFOUND getDatafileCursor := NULL; -- we closed it above IF (getDatafileRowcount = 0 AND getDatafileNoRows.error IS NOT NULL) THEN -- Signal the appropriate error. deb(DEB_EXIT, 'with norows error'); raise_application_error(getDatafileNoRows.error, getDatafileNoRows.msg); ELSE deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; -- signal end-of-fetch END IF; END IF; IF (skipTablespace(local.tsName)) THEN GOTO nextRow; END IF; -- This is needed in case that cursor is translateAllDatafile_c. IF (getDatafileLast.dfNumber = local.dfNumber) THEN IF (getDatafileLast.pluginSCN != 0) THEN IF (getDatafileLast.pluginSCN = local.pluginSCN) THEN deb(DEB_PRINT, 'not returning' || local.fileName); GOTO nextRow; END IF; ELSIF (getDatafileLast.dfCreationSCN = local.dfCreationSCN) THEN deb(DEB_PRINT, 'not returning' || local.fileName); GOTO nextRow; END IF; END IF; getDatafileLast := local; setDfTransClause(fno => local.dfNumber); dfRec := local; -- set OUT mode arg deb(DEB_EXIT); END getDatafile; ---------------------------- -- Online Log Translation -- ---------------------------- ----------------------------- translateOnlineLogs ----------------------------- PROCEDURE translateOnlineLogs(srls IN number DEFAULT 0) IS BEGIN deb(DEB_ENTER, 'translateOnlineLogs'); IF (translateOnlineLogs_c%ISOPEN) THEN validateState('translateOnlineLogs_c'); -- raise the error END IF; deb(DEB_OPEN, 'translateOnlineLogs_c, srls='||srls); OPEN translateOnlineLogs_c(srls); deb(DEB_EXIT); END translateOnlineLogs; --------------------------------- getOnlineLog -------------------------------- PROCEDURE getOnlineLog( fname OUT varchar2 ,thread# OUT number ,group# OUT number) IS BEGIN deb(DEB_ENTER, 'getOnlineLog'); FETCH translateOnlineLogs_c INTO thread#, group#, fname; IF (translateOnlineLogs_c%NOTFOUND) THEN CLOSE translateOnlineLogs_c; deb(DEB_EXIT, 'with NULL (no archivelog found)'||fname); fname := NULL; -- indicate end-of-fetch RETURN; END IF; deb(DEB_EXIT, 'with archivelog:'||fname); END getOnlineLog; ------------------------------ -- Archived Log Translation -- ------------------------------ -------------------------------- getArchivedLog ------------------------------- PROCEDURE getArchivedLog( alRec OUT NOCOPY alRec_t, closeCursor IN boolean DEFAULT FALSE) IS getArchivedLogRowcount number; local alRec_t; BEGIN deb(DEB_ENTER, 'getArchivedLog'); <> IF (getArchivedLogCursor = 'translateArcLogKey') THEN FETCH translateArcLogKey INTO local; IF (translateArcLogKey%NOTFOUND) THEN getArchivedLogRowcount := translateArcLogKey%ROWCOUNT; CLOSE translateArcLogKey; END IF; IF (closeCursor AND translateArcLogKey%ISOPEN) THEN CLOSE translateArcLogKey; END IF; ELSIF (getArchivedLogCursor = 'translateArcLogName') THEN FETCH translateArcLogName INTO local; IF (translateArcLogName%NOTFOUND) THEN getArchivedLogRowcount := translateArcLogName%ROWCOUNT; CLOSE translateArcLogName; END IF; IF (closeCursor AND translateArcLogName%ISOPEN) THEN CLOSE translateArcLogName; END IF; ELSIF (getArchivedLogCursor = 'translateArcLogSeqRange') THEN FETCH translateArcLogSeqRange INTO local; IF (translateArcLogSeqRange%NOTFOUND) THEN getArchivedLogRowcount := translateArcLogSeqRange%ROWCOUNT; CLOSE translateArcLogSeqRange; END IF; IF (closeCursor AND translateArcLogSeqRange%ISOPEN) THEN CLOSE translateArcLogSeqRange; END IF; ELSIF (getArchivedLogCursor = 'translateFrgnArcLogSeqRange') THEN -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY ELSIF (getArchivedLogCursor = 'translateArcLogSeqRange2') THEN FETCH translateArcLogSeqRange2 INTO local; IF (translateArcLogSeqRange2%NOTFOUND) THEN getArchivedLogRowcount := translateArcLogSeqRange2%ROWCOUNT; CLOSE translateArcLogSeqRange2; END IF; IF (closeCursor AND translateArcLogSeqRange2%ISOPEN) THEN CLOSE translateArcLogSeqRange2; END IF; ELSIF (getArchivedLogCursor = 'translateArcLogTimeRange') THEN FETCH translateArcLogTimeRange INTO local; IF (translateArcLogTimeRange%NOTFOUND) THEN getArchivedLogRowcount := translateArcLogTimeRange%ROWCOUNT; CLOSE translateArcLogTimeRange; END IF; IF (closeCursor AND translateArcLogTimeRange%ISOPEN) THEN CLOSE translateArcLogTimeRange; END IF; ELSIF (getArchivedLogCursor = 'translateFrgnArcLogTimeRange') THEN -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY ELSIF (getArchivedLogCursor = 'translateArcLogTimeRange2') THEN FETCH translateArcLogTimeRange2 INTO local; IF (translateArcLogTimeRange2%NOTFOUND) THEN getArchivedLogRowcount := translateArcLogTimeRange2%ROWCOUNT; CLOSE translateArcLogTimeRange2; END IF; IF (closeCursor AND translateArcLogTimeRange2%ISOPEN) THEN CLOSE translateArcLogTimeRange2; END IF; ELSIF (getArchivedLogCursor = 'translateArcLogSCNRange') THEN FETCH translateArcLogSCNRange INTO local; IF (translateArcLogSCNRange%NOTFOUND) THEN getArchivedLogRowcount := translateArcLogSCNRange%ROWCOUNT; CLOSE translateArcLogSCNRange; END IF; IF (closeCursor AND translateArcLogSCNRange%ISOPEN) THEN CLOSE translateArcLogSCNRange; END IF; ELSIF (getArchivedLogCursor = 'translateFrgnArcLogSCNRange') THEN -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY ELSIF (getArchivedLogCursor = 'translateArcLogSCNRange2') THEN FETCH translateArcLogSCNRange2 INTO local; IF (translateArcLogSCNRange2%NOTFOUND) THEN getArchivedLogRowcount := translateArcLogSCNRange2%ROWCOUNT; CLOSE translateArcLogSCNRange2; END IF; IF (closeCursor AND translateArcLogSCNRange2%ISOPEN) THEN CLOSE translateArcLogSCNRange2; END IF; ELSIF (getArchivedLogCursor = 'translateArcLogPattern') THEN FETCH translateArcLogPattern INTO local; IF (translateArcLogPattern%NOTFOUND) THEN getArchivedLogRowcount := translateArcLogPattern%ROWCOUNT; CLOSE translateArcLogPattern; END IF; IF (closeCursor AND translateArcLogPattern%ISOPEN) THEN CLOSE translateArcLogPattern; END IF; ELSIF (getArchivedLogCursor = 'translateFrgnArcLogPattern') THEN -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY ELSE deb(DEB_EXIT, 'with error 20204'); raise_application_error(-20204, 'Translation not started'); END IF; IF (closeCursor) THEN getArchivedLogCursor := NULL; END IF; IF (getArchivedLogRowcount IS NOT NULL) THEN getArchivedLogCursor := NULL; -- we closed it above getArchivedLogDoingRecovery := FALSE#; -- clear for next time getArchivedLogOnlyrdf := 0; -- clear for next time currInc := -1; deb(DEB_PRINT, 'getArchivedLogDoingRecovery cleared'); -- if log count returned from cursor is zero or we did not return -- a single row to client, signal norows error... IF ((getArchivedLogRowcount = 0 OR getArchivedLogLast.thread is NULL) AND getArchivedLogNoRows.error IS NOT NULL) THEN -- Signal the appropriate error. getArchivedLogLast := NULL; -- clear for next time deb(DEB_EXIT, 'with norows error'); raise_application_error(getArchivedLogNoRows.error, getArchivedLogNoRows.msg); ELSE deb(DEB_EXIT, 'with no more records'); getArchivedLogLast := NULL; -- clear for next time RAISE no_data_found; -- signal end-of-fetch END IF; END IF; deb(DEB_PRINT, 'getArchivedLog - resetscn='||local.rlgSCN|| ' thread='||local.thread|| ' seq='||local.sequence|| ' lowscn='||local.lowSCN|| ' nextscn='||local.nextSCN|| ' terminal='||local.terminal|| ' site_key_order_col='||local.site_key_order_col|| ' isrdf='||local.isrdf|| ' stamp='||local.stamp); -- Skip if orphan logs translation is for recovery purpose. IF (getArchivedLogDoingRecovery = TRUE#) THEN -- If possible, use previous log to identify incarnation info. IF (getArchivedLogLast.rlgSCN = local.rlgSCN AND getArchivedLogLast.rlgTime = local.rlgTime AND currInc <> -1) THEN deb(DEB_PRINT, 'getArchivedLog - currInc =' || currInc); -- Skip the orphan logs on ancestor branch IF (currInc > 0 AND local.lowSCN >= inc_list(currInc-1).resetlogs_change#-1) THEN deb(DEB_PRINT, 'getArchivedLog - Skip log - belongs to orphan branch'); GOTO nextRow; END IF; ELSE -- Need to determine the incarnation that wrote this log currInc := -1; FOR inc_idx in 0..max_inc_idx-1 LOOP -- If start recovery scn is greater than the incarnation resetlogs scn -- and the log belongs to an earlier incarnation, we don't need it. IF tc_fromSCN > inc_list(inc_idx).resetlogs_change# AND local.rlgSCN < inc_list(inc_idx).resetlogs_change# THEN deb(DEB_PRINT, 'getArchivedLog -Skip log precedes recovery SCN - '||inc_idx); EXIT; END IF; -- Log can be either on an orphan branch of an applicable incarnation -- or actually be an applicable log. If a log is on an orphaned -- branch, it will be skipped at the end of the loop -- since currInc won't get set. IF (local.rlgSCN = inc_list(inc_idx).resetlogs_change# AND local.rlgTime = inc_list(inc_idx).resetlogs_time) THEN -- A previous incarnation's archived log can be used only if -- its lowscn is less than the child incarnation's resetlogs IF inc_idx > 0 THEN IF local.lowSCN < inc_list(inc_idx-1).resetlogs_change# - 1 THEN currInc := inc_idx; deb(DEB_PRINT, 'getArchivedLog - currInc2 set to '||currInc); END IF; ELSE currInc := inc_idx; deb(DEB_PRINT, 'getArchivedLog - currIn3 set to '||currInc); END IF; EXIT; END IF; END LOOP; IF (currInc = -1) THEN deb(DEB_PRINT,'getArchivedLog - Skip log - not required by recovery'); GOTO nextRow; END IF; END IF; END IF; -- Eliminate duplicate archived logs unless getArchivedLogDuplicates is true IF (local.thread = getArchivedLogLast.thread AND local.sequence = getArchivedLogLast.sequence AND local.terminal = getArchivedLogLast.terminal AND local.lowSCN = getArchivedLogLast.lowSCN AND local.rlgSCN = getArchivedLogLast.rlgSCN AND local.rlgTime = getArchivedLogLast.rlgTime) THEN local.duplicate := TRUE#; END IF; IF (getArchivedLogDuplicates = FALSE# AND -- if don't want duplicates local.duplicate = TRUE#) THEN deb(DEB_PRINT, 'getArchivedLog - dont want duplicates'); GOTO nextRow; END IF; -- If caller wants only flash recovery area logs, return also their -- duplicates, so that the failover features can kick in if there is problem -- using the log in recovery area. For example, during backup of logs from -- flash recovery area, if we find a corrupt block then the server will -- failover to other log outside FRA. Likewise if a log from FRA can not -- be validated sucessfully and there exists a similar log outside FRA, the -- RMAN client could use other log - instead of aborting backup. Also, -- note that the log records are ordered such that the logs in Flash -- Recovery Area comes before the other non-FRA logs. Thus the following -- condition eliminates logs that are only outside FRA, but still return -- if they are duplicate of log in FRA. IF (getArchivedLogOnlyrdf = 1 AND local.duplicate = FALSE# AND local.isrdf = 'NO') THEN deb(DEB_PRINT, 'getArchiveLog - dont want non-recovery area log '); GOTO nextRow; END IF; -- For catalog case... -- BEGIN_CAT_RCVMAN_ONLY -- Filter duplicate Names IF IsDuplicateAlName(local.duplicate, local.filename) THEN GOTO nextRow; END IF; -- If we ever get a terminal recovery log for this sequence -- it is returned, instead of other log that is not marked terminal EOR. IF (local.thread = getArchivedLogLast.thread AND local.sequence = getArchivedLogLast.sequence AND local.lowSCN = getArchivedLogLast.lowSCN AND local.rlgSCN = getArchivedLogLast.rlgSCN AND local.rlgTime = getArchivedLogLast.rlgTime AND getArchivedLogLast.terminal = 'YES' AND local.terminal = 'NO') THEN deb(DEB_PRINT, 'getArchivedLog - Skip log - not an EOR log'); GOTO nextRow; END IF; -- END_CAT_RCVMAN_ONLY -- If doing recovery, filter bad sequence log. IF (getArchivedLogDoingRecovery = TRUE#) THEN IF (local.thread = getArchivedLogLast.thread AND local.lowSCN <> getArchivedLogLast.lowSCN AND local.sequence = getArchivedLogLast.sequence) THEN deb(DEB_PRINT, 'getArchivedLog - Skip log - filter bad sequence log'); GOTO nextRow; END IF; END IF; -- If stamp is LE 0, then this came from the brl (or al) table. -- Change stamp to be null since negative num is not a legal stamp value. IF (local.stamp <= 0) THEN local.stamp := NULL; END IF; IF getArchivedLogCursor IS NULL THEN getArchivedLogLast := NULL; -- clear for next time getArchivedLogDoingRecovery := FALSE#; -- clear for next time getArchivedLogOnlyrdf := 0; currInc := -1; deb(DEB_PRINT, 'getArchivedLogDoingRecovery cleared'); deb(DEB_PRINT, 'getArchivedLogLast := NULL'); ELSE getArchivedLogLast := local; deb(DEB_PRINT,'getArchivedLogLast('||getArchivedLogCursor||') := local'); deb(DEB_PRINT, 'getArchivedLogLast := local'); END IF; alRec := local; -- set OUT mode arg deb(DEB_EXIT); END getArchivedLog; --------------------------- translateArchivedLogKey --------------------------- PROCEDURE translateArchivedLogKey( alKey IN number ,needstby IN number DEFAULT NULL) IS BEGIN deb(DEB_ENTER, 'translateArchivedLogKey'); validateState(getArchivedLogCursor); deb(DEB_OPEN, 'translateArcLogKey'); OPEN translateArcLogKey(alKey => alKey); getArchivedLogCursor := 'translateArcLogKey'; getArchivedLogDuplicates := NULL; getArchivedLogNoRows.error := -20240; getArchivedLogNoRows.msg := 'Archived log does not exist'; deb(DEB_EXIT); END translateArchivedLogKey; --------------------------- translateArchivedLogKey --------------------------- PROCEDURE translateArchivedLogKey( al_key IN number ,available IN number DEFAULT 1 -- ignored (for compatability) ,unavailable IN number DEFAULT 1 -- ignored (for compatability) ,deleted IN number DEFAULT 1 -- ignored (for compatability) ,online IN number DEFAULT 1 -- ignored (for compatability) ,recid OUT number ,stamp OUT number ,thread# OUT number ,sequence# OUT number ,low_scn OUT number ,reset_scn OUT number ,block_size OUT number ,fname OUT varchar2 ,needstby IN number DEFAULT NULL) IS alRec alRec_t; BEGIN deb(DEB_ENTER, 'translateArchivedLogKey816'); translateArchivedLogKey(alKey => al_key, needstby => needstby); getArchivedLog(alRec => alRec, closeCursor => TRUE); recid := alRec.recid; stamp := alRec.stamp; thread# := alRec.thread; sequence# := alRec.sequence; low_scn := alRec.lowSCN; reset_scn := alRec.rlgSCN; block_size := alRec.blockSize; fname := alRec.fileName; deb(DEB_EXIT); END translateArchivedLogKey; -- Translate archived log name. The archived logs are ordered by -- stamp in order to return most recently created log first, because it -- most likely to exist. Note that archived logs that belong to any -- incarnation of the current database are returned. --------------------------- translateArchivedLogName -------------------------- PROCEDURE translateArchivedLogName( fname IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,online IN number -- ignored ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,needstby IN number DEFAULT NULL) IS BEGIN deb(DEB_ENTER, 'translateArchivedLogName'); validateState(getArchivedLogCursor); deb(DEB_OPEN, 'translateArcLogName'); OPEN translateArcLogName(fname => fname, online => online, statusMask => NVL(statusMask, computeAvailableMask(available, unavailable, deleted, 0)), needstby => needstby); getARchivedLogCursor := 'translateArcLogName'; getArchivedLogDuplicates := duplicates; getArchivedLogNoRows.error := -20240; getArchivedLogNoRows.msg := 'Archived log does not exist'; deb(DEB_EXIT); END translateArchivedLogName; -- Translate an archived log sequence range. The archived logs are ordered -- by thread#, first_change#, sequence# to make detection of "missing" -- archived logs in the range easy and to return the logs in the order that -- recovery will apply them. Note that sequence number is reset if the -- controlfile is recreated so first change must be before sequence# number -- in the order by clause. By default, this procedure returns logs for -- current incarnation, however, using the incarn parameter, any incarnation's -- logs may be provided. -- The incarn parameter is interpreted as: -- -1 -- current incarnation -- 0 -- any incarnation -- other-- a specific incarnation number -- NULL -- defaults to -1 ------------------------- translateArchivedLogSeqRange ------------------------ PROCEDURE translateArchivedLogSeqRange( thread# IN number ,fromseq# IN number ,toseq# IN number ,pattern IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,online IN number -- ignored ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,needstby IN number DEFAULT NULL -- for compatability ,foreignal IN binary_integer DEFAULT 0 -- for compatability ,incarn IN number DEFAULT NULL) -- for compatibility IS mask number := NVL(statusMask, computeAvailableMask(available, unavailable, deleted, 0)); BEGIN deb(DEB_ENTER, 'translateArchivedLogSeqRange'); validateState(getArchivedLogCursor); IF (thread# is NULL) THEN deb(DEB_EXIT, 'with error 20210'); raise_application_error(-20210, 'Thread# is missing'); END IF; setAlTransClause(thread => thread#, fromSeq => fromseq#, toSeq => toseq#, pattern => pattern); IF (foreignal != 0) THEN IF (tc_dbid.count = 0) THEN tc_anydbid := TRUE#; END IF; deb(DEB_OPEN, 'translateFrgnArcLogSeqRange'); -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY ELSE IF (bitand(mask,BSdeleted) != 0 AND pattern IS NULL) THEN -- Use the cursor that also looks at backup redo logs. -- Available and unavailable are ignored. Since we are looking -- at backup redo logs, pattern cannot be implemented here, so we -- use this cursor only if pattern is null. deb(DEB_OPEN, 'translateArcLogSeqRange2'); OPEN translateArcLogSeqRange2(thread# => thread#, incarn => NVL(incarn,-1), fromseq# => fromseq#, toseq# => toseq#, statusMask => mask, online => online, needstby => needstby); getArchivedLogCursor := 'translateArcLogSeqRange2'; ELSE deb(DEB_OPEN, 'translateArcLogSeqRange'); OPEN translateArcLogSeqRange(thread# => thread#, incarn => NVL(incarn,-1), fromseq# => fromseq#, toseq# => toseq#, pattern => pattern, statusMask => mask, online => online, needstby => needstby); getArchivedLogCursor := 'translateArcLogSeqRange'; END IF; getArchivedLogNoRows.error := -20242; getArchivedLogNoRows.msg := 'No archived logs in the range specified'; END IF; getArchivedLogDuplicates := duplicates; deb(DEB_EXIT); END translateArchivedLogSeqRange; -- Translate an archived log time range, note it can return parent incarnation -- logs -- The incarn parameter is interpreted as: -- -1 -- current incarnation -- 0 -- any incarnation -- other-- a specific incarnation number -- NULL -- defaults to 0 ------------------------ translateArchivedLogTimeRange ------------------------ PROCEDURE translateArchivedLogTimeRange( thread# IN number ,fromTime IN date ,toTime IN date ,pattern IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,online IN number -- ignored ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,needstby IN number DEFAULT NULL -- for compatability ,foreignal IN binary_integer DEFAULT 0 -- for compatability ,incarn IN number DEFAULT NULL) -- for compatibility IS mask number := NVL(statusMask, computeAvailableMask(available, unavailable, deleted, 0)); BEGIN deb(DEB_ENTER, 'translateArchivedLogTimeRange'); validateState(getArchivedLogCursor); setAlTransClause(thread => thread#, fromTime => fromTime, toTime => toTime, pattern => pattern); IF (foreignal != 0) THEN IF (tc_dbid.count = 0) THEN tc_anydbid := TRUE#; END IF; deb(DEB_OPEN, 'translateFrgnArcLogTimeRange'); -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY ELSE IF (bitand(mask,BSdeleted) != 0 AND pattern IS NULL) THEN -- Use the cursor that also looks at backup redo logs. -- Available and unavailable are ignored. Since we are looking -- at backup redo logs, pattern cannot be implemented here, so we -- use this cursor only if pattern is null. deb(DEB_OPEN, 'translateArcLogTimeRange2'); OPEN translateArcLogTimeRange2(thread# => thread#, incarn => NVL(incarn,0), fromTime => fromTime, toTime => toTime, statusMask => mask, online => online, needstby => needstby); getArchivedLogCursor := 'translateArcLogTimeRange2'; ELSE deb(DEB_OPEN, 'translateArcLogTimeRange'); OPEN translateArcLogTimeRange(thread# => thread#, incarn => NVL(incarn,0), fromTime => fromTime, toTime => toTime, pattern => pattern, statusMask => mask, online => online, needstby => needstby); getArchivedLogCursor := 'translateArcLogTimeRange'; END IF; getArchivedLogNoRows.error := -20242; getArchivedLogNoRows.msg := 'No archived logs in the range specified'; END IF; getArchivedLogDuplicates := duplicates; deb(DEB_EXIT); END translateArchivedLogTimeRange; -- Translate an archived log scn range, note it can return parent incarnation -- logs -- The incarn parameter is interpreted as: -- -1 -- current incarnation -- 0 -- any incarnation -- other-- a specific incarnation number -- NULL -- defaults to 0 ------------------------- translateArchivedLogSCNRange ------------------------ PROCEDURE translateArchivedLogSCNRange( thread# IN number ,fromSCN IN number ,toSCN IN number ,pattern IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,online IN number ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,needstby IN number DEFAULT NULL ,doingRecovery IN number DEFAULT FALSE# ,onlyrdf IN binary_integer DEFAULT 0 ,reset_scn IN number DEFAULT NULL -- for compatibility ,reset_time IN date DEFAULT NULL -- for compatibility ,sequence# IN number DEFAULT NULL -- for compatibility ,foreignal IN binary_integer DEFAULT 0 -- for compatability ,incarn IN number DEFAULT NULL) -- for compatibility IS adjusted_toSCN number; mask number := NVL(statusMask, computeAvailableMask(available, unavailable, deleted, 0)); BEGIN deb(DEB_ENTER, 'translateArchivedLogSCNRange'); validateState(getArchivedLogCursor); IF (untilTime IS NULL) THEN -- Adjust the toSCN to take the until SCN into account. The untilSCN -- was either specified directly by the user, or it was computed from -- the until logseq. In either case, it is the exact SCN at which -- recovery will stop. adjusted_toSCN := least(toSCN, nvl(untilSCN, toSCN)); ELSE -- Leave the toSCN alone. We cannot adjust the toSCN using -- the untilSCN here because the until SCN was estimated by -- computeUntilSCN(), and this procedure deliberately estimates the -- SCN low. We cannot tolerate a low estimated SCN here because -- then we might filter out a log that will be needed by recovery. -- So instead, we pass the untilTime to the cursor. adjusted_toSCN := toSCN; END IF; -- 8.1.6 and later always make sure that the toSCN is greater than the -- fromSCN, but 8.1.5 and earlier will call us with fromSCN==toSCN. -- The query will fail if they are equal, so make the toSCN greater than -- the fromSCN. It is also possible that adjusting by the untilSCN above -- made the fromSCN==toSCN. IF (adjusted_toSCN <= fromSCN) THEN adjusted_toSCN := fromSCN+1; END IF; setAlTransClause(thread => thread#, fromSCN => fromSCN, toSCN => adjusted_toSCN, pattern => pattern); IF (foreignal != 0) THEN IF (tc_dbid.count = 0) THEN tc_anydbid := TRUE#; END IF; deb(DEB_OPEN, 'translateFrgnArcLogSCNRange'); -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY ELSE IF (bitand(mask,BSdeleted) != 0 AND pattern IS NULL) THEN -- Use the cursor that also looks at backup redo logs. -- Available and unavailable are ignored. Since we are looking -- at backup redo logs, pattern cannot be implemented here, so we -- use this cursor only if pattern is null. deb(DEB_OPEN, 'translateArcLogSCNRange2'); OPEN translateArcLogSCNRange2(thread# => thread#, incarn => NVL(incarn,0), sequence# => sequence#, fromSCN => fromSCN, toSCN => adjusted_toSCN, toTime => untilTime, statusMask => mask, online => online, needstby => needstby, reset_scn => reset_scn, reset_time => reset_time); getArchivedLogCursor := 'translateArcLogSCNRange2'; deb(DEB_IN, ' using cursor 2 fromSCN=' || to_char(fromSCN) || ' toSCN=' || to_char(adjusted_toSCN)); ELSE deb(DEB_OPEN, 'translateArcLogSCNRange'); OPEN translateArcLogSCNRange(thread# => thread#, incarn => NVL(incarn,0), sequence# => sequence#, fromSCN => fromSCN, toSCN => adjusted_toSCN, pattern => pattern, statusMask => mask, online => online, needstby => needstby, reset_scn => reset_scn, reset_time => reset_time); getArchivedLogCursor := 'translateArcLogSCNRange'; END IF; getArchivedLogNoRows.error := -20242; getArchivedLogNoRows.msg := 'No archived logs in the range specified'; END IF; getArchivedLogDuplicates := duplicates; getArchivedLogDoingRecovery := DoingRecovery; IF (DoingRecovery = TRUE#) THEN deb(DEB_PRINT, 'getArchivedLogDoingRecovery set to TRUE'); END IF; getArchivedLogOnlyrdf := onlyrdf; deb(DEB_EXIT); END translateArchivedLogSCNRange; -- Translate an archived log name pattern, note it can return parent -- incarnation logs ------------------------- translateArchivedLogPattern ------------------------- PROCEDURE translateArchivedLogPattern( pattern IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,online IN number ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,needstby IN number DEFAULT NULL -- for compatability ,foreignal IN binary_integer DEFAULT 0) -- for compatability IS mask number := NVL(statusMask, computeAvailableMask(available, unavailable, deleted, 0)); BEGIN deb(DEB_ENTER, 'translateArchivedLogPattern'); IF (bitand(mask,BSdeleted) != 0 AND pattern IS NULL) THEN -- Use the cursor that also looks at union of brl and al tables. -- This must be 'archivelog all'. From 8.1.6+, this condition -- is checked in krmk.pc (krmkaltr()). Here, to maintain compatibility -- with lower version of RMAN (8.1.5-) exec, we move the check here. -- This removes us from doing 'compatible' check translateArchivedLogSCNRange( thread# => NULL, fromscn => 0, toscn => 281474976710655+1, pattern => NULL, statusMask => mask, online => online, duplicates => duplicates, foreignal => foreignal); ELSE validateState(getArchivedLogCursor); setAlTransClause(pattern => pattern); IF (foreignal != 0) THEN IF (tc_dbid.count = 0) THEN tc_anydbid := TRUE#; END IF; deb(DEB_OPEN, 'translateFrgnArcLogPattern'); -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY ELSE deb(DEB_OPEN, 'translateArcLogPattern'); OPEN translateArcLogPattern(pattern => pattern, statusMask => mask, online => online); getArchivedLogCursor := 'translateArcLogPattern'; getArchivedLogNoRows.error := -20242; getArchivedLogNoRows.msg := 'No archived logs in the range specified'; END IF; getArchivedLogDuplicates := duplicates; END IF; deb(DEB_EXIT); END translateArchivedLogPattern; -------------------------- translateArchivedLogCancel ------------------------- PROCEDURE translateArchivedLogCancel IS BEGIN deb(DEB_ENTER, 'translateArchivedLogCancel'); IF (getArchivedLogCursor = 'translateArcLogKey') THEN CLOSE translateArcLogKey; ELSIF (getArchivedLogCursor = 'translateArcLogName') THEN CLOSE translateArcLogName; ELSIF (getArchivedLogCursor = 'translateArcLogSeqRange') THEN CLOSE translateArcLogSeqRange; ELSIF (getArchivedLogCursor = 'translateFrgnArcLogSeqRange') THEN -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY ELSIF (getArchivedLogCursor = 'translateArcLogSeqRange2') THEN CLOSE translateArcLogSeqRange2; ELSIF (getArchivedLogCursor = 'translateArcLogTimeRange') THEN CLOSE translateArcLogTimeRange; ELSIF (getArchivedLogCursor = 'translateFrgnArcLogTimeRange') THEN -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY ELSIF (getArchivedLogCursor = 'translateArcLogTimeRange2') THEN CLOSE translateArcLogTimeRange2; ELSIF (getArchivedLogCursor = 'translateArcLogSCNRange') THEN CLOSE translateArcLogSCNRange; ELSIF (getArchivedLogCursor = 'translateFrgnArcLogSCNRange') THEN -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY ELSIF (getArchivedLogCursor = 'translateArcLogSCNRange2') THEN CLOSE translateArcLogSCNRange2; ELSIF (getArchivedLogCursor = 'translateArcLogPattern') THEN CLOSE translateArcLogPattern; ELSIF (getArchivedLogCursor = 'translateFrgnArcLogPattern') THEN -- BEGIN_CAT_RCVMAN_ONLY raise_application_error(-20999, 'Not supported in recovery catalog'); -- END_CAT_RCVMAN_ONLY END IF; getArchivedLogCursor := NULL; -- we closed it above getArchivedLogLast := NULL; -- clear for next time getArchivedLogDoingRecovery := FALSE#; -- clear for next time resetAlTransClause; deb(DEB_EXIT); END translateArchivedLogCancel; -- This public procedure for 8.1.5 and previous releases. Recid will be -- null to indicate end-of-fetch. That was a bad interface design, but -- we are stuck with it now. -------------------------------- getArchivedLog ------------------------------- PROCEDURE getArchivedLog( recid OUT number ,stamp OUT number ,thread# OUT number ,sequence# OUT number ,low_scn OUT number ,nxt_scn OUT number ,fname OUT varchar2 ,reset_scn OUT number ,block_size OUT number ,blocks OUT number) IS alRec alRec_t; BEGIN deb(DEB_ENTER, 'getArchivedLog'); <> getArchivedLog(alRec); -- Bug 1186598: From 8.1.5- Rman EXEC does union of brl and al tables, -- when deleted=TRUE. -- Getting a null recid shouldn't affect us. -- Return OUT mode args recid := nvl(alRec.recid,0); -- no null indicator before 8.1.6 stamp := nvl(alRec.stamp, 0); -- no null indicator before 8.1.6 thread# := alRec.thread; sequence# := alRec.sequence; low_scn := alRec.lowSCN; nxt_scn := alRec.nextSCN; fname := nvl(alRec.fileName, 'null'); -- no null indicator reset_scn := alRec.rlgSCN; block_size := alRec.blockSize; blocks := alRec.blocks; deb(DEB_EXIT); EXCEPTION WHEN no_data_found THEN recid := NULL; -- indicate end-of-fetch stamp := NULL; deb(DEB_EXIT, 'with no more records'); END getArchivedLog; --------------------------------- -- Controlfilecopy Translation -- --------------------------------- -- Translate a controlfilecopy name. ------------------------- translateControlFileCopyName ------------------------ PROCEDURE translateControlFileCopyName( fname IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,onlyone IN number DEFAULT 1) IS BEGIN deb(DEB_ENTER, 'translateControlFileCopyName'); -- Replaces ccf_name getControlFileCopyCursor := 'findControlfileBackup_c'; deb(DEB_OPEN, 'findControlfileBackup_c'); OPEN findControlfileBackup_c(sourcemask => imageCopy_con_t, pattern => fname, currentIncarnation => FALSE#, statusMask => NVL(statusMask, computeAvailableMask(available,unavailable,0,0))); -- Initialize singleRow variable IF (duplicates = FALSE# and onlyone is NOT NULL) THEN getControlFileCopySingleRow := TRUE; ELSE getControlFileCopySingleRow := FALSE; END IF; deb(DEB_EXIT); END translateControlFileCopyName; -- Translate a controlfilecopy tag. ------------------------- translateControlFileCopyTag ------------------------ PROCEDURE translateControlFileCopyTag( cftag IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL ,onlyone IN number DEFAULT 1) IS BEGIN deb(DEB_ENTER, 'translateControlFileCopyTag'); -- Replaces ccf_name deb(DEB_OPEN, 'findControlfileBackup_c'); getControlFileCopyCursor := 'findControlfileBackup_c'; OPEN findControlfileBackup_c( sourcemask => imageCopy_con_t, tag => cftag, currentIncarnation => FALSE#, statusMask => NVL(statusMask, computeAvailableMask(available,unavailable,0,0))); -- Initialize singleRow variable IF (duplicates = FALSE# and onlyone is NOT NULL) THEN getControlFileCopySingleRow := TRUE; ELSE getControlFileCopySingleRow := FALSE; END IF; deb(DEB_EXIT); END translateControlFileCopyTag; -- Translate a controlfilecopy key. ------------------------- translateControlFileCopyKey ------------------------ PROCEDURE translateControlFileCopyKey( key IN number ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL) IS -- for compatability BEGIN deb(DEB_ENTER, 'translateControlFileCopyKey'); -- Replaces ccf_name deb(DEB_OPEN, 'findControlFileCopyKey'); -- Note that with the rcvcat, we cannot reuse the findDatafileCopyKey -- cursor, because the tables being searched are different. getControlFileCopyCursor := 'findControlFileCopyKey'; OPEN findControlFileCopyKey( copyKey => key, statusMask => NVL(statusMask, computeAvailableMask(available,unavailable,0,0))); deb(DEB_EXIT); END translateControlFileCopyKey; ------------------------------ getControlFileCopy ----------------------------- PROCEDURE getControlFileCopy( rcvRec IN OUT rcvRec_t) IS getControlFileCopyRowcount number; BEGIN deb(DEB_ENTER, 'getControlFileCopy'); IF (getControlFileCopyCursor = 'findControlFileCopyKey' AND findControlFileCopyKey%ISOPEN) THEN FETCH findControlFileCopyKey INTO rcvRec; <> IF (findControlFileCopyKey%NOTFOUND) THEN -- Save rowcount before closing cursor getControlFileCopyRowcount := findControlFileCopyKey%ROWCOUNT; CLOSE findControlFileCopyKey; IF (getControlFileCopyRowcount = 0) THEN deb(DEB_EXIT, 'with error 20220'); raise_application_error(-20220, 'Controlfile copy does not exist'); ELSE deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; -- signal end-of-fetch END IF; END IF; IF (getControlFileCopySingleRow = TRUE AND findControlFileCopyKey%ROWCOUNT > 1) THEN -- We only want a single row, and we've already returned 1 row, so -- just treat this as end-of-fetch CLOSE findControlFileCopyKey; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; ELSIF (getControlFileCopyCursor = 'findControlfileBackup_c' AND findControlfileBackup_c%ISOPEN) THEN FETCH findControlfileBackup_c INTO rcvRec; IF (findControlfileBackup_c%NOTFOUND) THEN -- Save rowcount before closing cursor getControlFileCopyRowcount := findControlfileBackup_c%ROWCOUNT; CLOSE findControlfileBackup_c; IF (getControlFileCopyRowcount = 0) THEN deb(DEB_EXIT, 'with error 20220'); raise_application_error(-20220, 'Controlfile copy does not exist'); ELSE deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; -- signal end-of-fetch END IF; END IF; IF (getControlFileCopySingleRow = TRUE AND findControlfileBackup_c%ROWCOUNT > 1) THEN -- We only want a single row, and we've already returned 1 row, so -- just treat this as end-of-fetch CLOSE findControlfileBackup_c; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; ELSE deb(DEB_EXIT, 'with error 20204'); raise_application_error(-20204, 'Translation not started (' || getControlFileCopyCursor || ')'); END IF; deb(DEB_EXIT); END getControlFileCopy; -- Obsolete as of 8.1.6 ------------------------------ getControlFileCopy ----------------------------- PROCEDURE getControlFileCopy( recid OUT number ,stamp OUT number ,reset_scn OUT number ,ckp_scn OUT number ,block_size OUT number) IS rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'getControlFileCopy'); getControlFileCopy(rcvRec); recid := rcvRec.recid_con; stamp := rcvRec.stamp_con; reset_scn := rcvRec.rlgSCN_act; ckp_scn := rcvRec.toSCN_act; block_size := rcvRec.blockSize_con; deb(DEB_EXIT); EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with no more records'); -- This just means end-of-fetch recid := NULL; -- signal end-of-fetch to caller END getControlFileCopy; ------------------------------ -- Datafilecopy Translation -- ------------------------------ ------------------------------- getDataFileCopy ------------------------------- PROCEDURE getDataFileCopy( rcvRec OUT NOCOPY rcvRec_t ,closeCursor IN boolean DEFAULT FALSE) IS getDataFileCopyRowcount number; local rcvRec_t; BEGIN deb(DEB_ENTER, 'getDataFileCopy'); <> IF (getDatafileCopyCursor = 'findDatafileCopyKey') THEN FETCH findDatafileCopyKey INTO local; IF (findDatafileCopyKey%NOTFOUND) THEN getDataFileCopyRowcount := findDatafileCopyKey%ROWCOUNT; CLOSE findDatafileCopyKey; END IF; IF (closeCursor AND findDatafileCopyKey%ISOPEN) THEN CLOSE findDatafileCopyKey; END IF; ELSIF (getDatafileCopyCursor = 'findDatafileBackup_c') THEN IF (getDataFileCopySingleRow = TRUE AND findDatafileBackup_c%ROWCOUNT > 1) THEN -- We only want a single row, and we've already returned 1 row, so -- just treat this as end-of-fetch CLOSE findDatafileBackup_c; getDatafileCopyCursor := NULL; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; FETCH findDatafileBackup_c INTO local; IF (findDatafileBackup_c%NOTFOUND) THEN getDataFileCopyRowcount := findDatafileBackup_c%ROWCOUNT; CLOSE findDatafileBackup_c; END IF; IF (closeCursor AND findDatafileBackup_c%ISOPEN) THEN CLOSE findDatafileBackup_c; END IF; ELSE deb(DEB_EXIT, 'with error 20204'); raise_application_error(-20204, 'Translation not started'); END IF; IF (closeCursor) THEN getDatafileCopyCursor := NULL; END IF; IF (getDataFileCopyRowcount IS NOT NULL) THEN getDatafileCopyCursor := NULL; IF (getDataFileCopyRowcount = 0 AND getDatafileCopyNoRows.error IS NOT NULL) THEN deb(DEB_EXIT, 'with norows error'); raise_application_error(getDatafileCopyNoRows.error, getDatafileCopyNoRows.msg); ELSE deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; -- signal end-of-fetch END IF; END IF; IF (getDataFileCopyLatestOnly = TRUE AND getDataFileCopyLast.dfNumber_obj = local.dfNumber_obj) THEN GOTO nextRow; END IF; getDataFileCopyLast := local; -- save for duplicate filtering rcvRec := local; -- set OUT mode arg setDfTransClause(fno => local.dfNumber_obj); deb(DEB_EXIT); END getDataFileCopy; --------------------------- translateDataFileCopyKey -------------------------- PROCEDURE translateDataFileCopyKey( cdf_key IN number ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL) -- for compatability IS BEGIN deb(DEB_ENTER, 'translateDataFileCopyKey'); validateState(getDatafileCopyCursor); -- Replaces cursor that used to be in translateDataFileCopyKey deb(DEB_OPEN, 'findDataFileCopyKey'); OPEN findDataFileCopyKey(copyKey => cdf_key, statusMask => NVL(statusMask, computeAvailableMask(available, unavailable, 0, 0))); getDatafileCopyCursor := 'findDatafileCopyKey'; getDataFileCopyNoRows.error := -20230; getDataFileCopyNoRows.msg := 'Datafile copy does not exist'; getDataFileCopyDuplicates := NULL; getDataFileCopySingleRow := NULL; deb(DEB_EXIT); END translateDatafileCopyKey; -- Obsolete as of 8.1.6 --------------------------- translateDataFileCopyKey -------------------------- PROCEDURE translateDataFileCopyKey( cdf_key IN number ,available IN number ,unavailable IN number ,recid OUT number ,stamp OUT number ,file# OUT number ,fname OUT varchar2 ,reset_scn OUT number ,create_scn OUT number ,ckp_scn OUT number ,block_size OUT number ,blocks OUT number) IS rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'translateDataFileCopyKey815'); translateDataFileCopyKey(cdf_key => cdf_key, available => available, unavailable => unavailable); getDataFileCopy(rcvRec => rcvRec, closeCursor => TRUE); recid := rcvRec.recid_con; stamp := rcvRec.stamp_con; file# := rcvRec.dfNumber_obj; fname := rcvRec.fileName_con; reset_scn := rcvRec.rlgSCN_act; create_scn := rcvRec.dfCreationSCN_obj; ckp_scn := rcvRec.toSCN_act; block_size := rcvRec.blockSize_con; blocks := rcvRec.blocks_con; deb(DEB_EXIT); END translateDataFileCopyKey; -- Translate a datafilecopy name. The datafile copies are ordered by -- stamp in order to return most recently created copy first, because it -- most likely to exist. Note that datafilecopies that belong to any -- incarnation of the current database are returned. -------------------------- translateDatafileCopyName -------------------------- PROCEDURE translateDatafileCopyName( fname IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatability ,onlyone IN number DEFAULT 1 ,pluginSCN IN number DEFAULT 0) IS BEGIN deb(DEB_ENTER, 'translateDatafileCopyName'); validateState(getDatafileCopyCursor); -- Replaces cdf_name deb(DEB_OPEN, 'findDatafileBackup_c'); OPEN findDatafileBackup_c( sourcemask => imageCopy_con_t, pattern => fname, statusMask => nvl(statusMask, computeAvailableMask(available, unavailable, 0, 0)), duplicates => duplicates, pluginSCN => pluginSCN ); getDatafileCopyCursor := 'findDatafileBackup_c'; getDatafileCopyNoRows.error := -20230; getDatafileCopyNoRows.msg := 'Datafile copy does not exist'; getDatafileCopyDuplicates := duplicates; getDatafileCopyLast.dfNumber_obj := NULL; getDatafileCopyLatestOnly := FALSE; IF (duplicates = FALSE# and onlyone is NOT NULL) THEN getDatafileCopySingleRow := TRUE; ELSE getDatafileCopySingleRow := FALSE; END IF; deb(DEB_EXIT); END translateDatafileCopyName; -- Translate a datafilecopy tag. The datafilecopies are ordered by file# -- in order to make the duplicate elimination work and by descending -- checkpoint scn to return most recent copy first. -- ### only copies that belong to current database incarnation are returned, -- We should change this in future to return datafile copies that belong -- previous incarnations of the database. The difficulty is that the recovery -- catalog may also contain datafile copies that belong to "orphaned" -- incarnation of the database. --------------------------- translateDataFileCopyTag -------------------------- PROCEDURE translateDataFileCopyTag( tag IN varchar2 ,available IN number DEFAULT NULL -- for compatibility ,unavailable IN number DEFAULT NULL -- for compatibility ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL -- for compatibility ,pluginSCN IN number DEFAULT 0 ,onlytc IN binary_integer DEFAULT FALSE#) -- for compatibility IS BEGIN deb(DEB_ENTER, 'translateDataFileCopyTag'); validateState(getDatafileCopyCursor); -- Replaces cdf_tag. Note that we look only for ones in the current -- incarnation because that is what cdf_tag did. If access to -- datafilecopies in other incarnations is required, it must be done by -- key or filename. deb(DEB_OPEN, 'findDataFileBackup_c'); if (onlytc != FALSE#) THEN deb(DEB_PRINT, 'onytc is TRUE#'); end if; OPEN findDatafileBackup_c( sourcemask => imageCopy_con_t, tag => tag, reset_scn => this_reset_scn, reset_time => this_reset_time, statusMask => nvl(statusMask, computeAvailableMask(available, unavailable, 0, 0)), duplicates => duplicates, pluginSCN => pluginSCN, onlytc => onlytc ); getDatafileCopyCursor := 'findDatafileBackup_c'; getDataFileCopyNoRows.error := -20232; getDataFileCopyNoRows.msg := 'Datafile copy tag does not match'; getDataFileCopyDuplicates := duplicates; getDataFileCopyLast.dfNumber_obj := NULL; getDataFileCopySingleRow := FALSE; getDataFileCopyLatestOnly := FALSE; deb(DEB_EXIT); END translateDataFileCopyTag; -- Translate a datafilecopy file number. The datafile copies are ordered by -- stamp in order to return most recently created copy first, because it -- most likely to exist. Note that datafilecopies that belong to any -- incarnation of the current database are returned. -------------------------- translateDatafileCopyFno -------------------------- PROCEDURE translateDatafileCopyFno( fno IN number ,available IN number DEFAULT NULL ,unavailable IN number DEFAULT NULL ,duplicates IN number ,statusMask IN binary_integer DEFAULT NULL ,pluginSCN IN number DEFAULT 0) IS BEGIN deb(DEB_ENTER, 'translateDatafileCopyFno'); validateState(getDatafileCopyCursor); deb(DEB_OPEN, 'findDatafileBackup_c'); OPEN findDatafileBackup_c( duplicates => duplicates, sourcemask => imageCopy_con_t, fno => fno, statusMask => NVL(statusMask, computeAvailableMask(available, unavailable, 0, 0)), pluginSCN => pluginSCN ); getDatafileCopyCursor := 'findDatafileBackup_c'; getDatafileCopyNoRows.error := -20230; getDatafileCopyNoRows.msg := 'Datafile copy does not exist'; getDatafileCopyDuplicates := duplicates; getDatafileCopyLatestOnly := FALSE; getDatafileCopyLast.dfNumber_obj := NULL; IF (duplicates = FALSE#) THEN getDatafileCopySingleRow := TRUE; ELSE getDatafileCopySingleRow := FALSE; END IF; setDfTransClause(fno => fno); deb(DEB_EXIT); END translateDatafileCopyFno; -- Obsolete as of 8.1.6 ------------------------------- getDataFileCopy ------------------------------- PROCEDURE getDataFileCopy( recid OUT number ,stamp OUT number ,file# OUT number ,fname OUT varchar2 ,reset_scn OUT number ,create_scn OUT number ,ckp_scn OUT number ,block_size OUT number ,blocks OUT number) IS rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'getDataFileCopy'); getDataFileCopy(rcvRec); recid := rcvRec.recid_con; stamp := rcvRec.stamp_con; file# := rcvRec.dfNumber_obj; fname := rcvRec.fileName_con; reset_scn := rcvRec.rlgSCN_act; create_scn := rcvRec.dfCreationSCN_obj; ckp_scn := rcvRec.toSCN_act; block_size := rcvRec.blockSize_con; blocks := rcvRec.blocks_con; deb(DEB_EXIT); EXCEPTION WHEN no_data_found THEN recid := NULL; -- signal end-of-fetch to client deb(DEB_EXIT, 'with no more records'); END getDataFileCopy; ---------------------------- -- Proxy Copy Translation -- ---------------------------- --------------------------------- getProxyCopy -------------------------------- PROCEDURE getProxyCopy( rcvRec OUT NOCOPY rcvRec_t ,closeCursor IN boolean DEFAULT FALSE) IS getProxyCopyRowcount number; dummy rcvRec_t; BEGIN deb(DEB_ENTER, 'getProxyCopy'); IF (getProxyCopyCursor = 'findProxyCopy') THEN FETCH findProxyCopy INTO rcvRec; IF (findProxyCopy%NOTFOUND) THEN getProxyCopyRowcount := findProxyCopy%ROWCOUNT; CLOSE findProxyCopy; ELSE IF (getProxyCopyByHandle) THEN -- Make sure there is only 1 row FETCH findProxyCopy INTO dummy; IF (NOT findProxyCopy%NOTFOUND) THEN CLOSE findProxyCopy; deb(DEB_EXIT, 'with error 20311'); raise_application_error(-20311, 'Ambiguous proxy copy handle'); END IF; END IF; END IF; IF (closeCursor AND findProxyCopy%ISOPEN) THEN CLOSE findProxyCopy; END IF; ELSIF (getProxyCopyCursor = 'findProxyCopyKey') THEN FETCH findProxyCopyKey INTO rcvRec; IF (findProxyCopyKey%NOTFOUND) THEN getProxyCopyRowcount := findProxyCopyKey%ROWCOUNT; CLOSE findProxyCopyKey; END IF; IF (closeCursor AND findProxyCopyKey%ISOPEN) THEN CLOSE findProxyCopyKey; END IF; ELSE deb(DEB_EXIT, 'with errors 20204'); raise_application_error(-20204, 'Translation not started'); END IF; IF (closeCursor) THEN getProxyCopyCursor := NULL; END IF; IF (getProxyCopyRowcount IS NOT NULL) THEN getProxyCopyCursor := NULL; IF (getProxyCopyRowcount = 0 AND getProxyCopyNoRows.error IS NOT NULL) THEN deb(DEB_EXIT, 'with norows error'); raise_application_error(getProxyCopyNoRows.error, getProxyCopyNoRows.msg); ELSE deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; -- signal end-of-fetch END IF; END IF; deb(DEB_EXIT); END getProxyCopy; ---------------------------- translateProxyCopyKey ---------------------------- PROCEDURE translateProxyCopyKey( pc_key IN number ,deviceType IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,expired IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL) -- for compatability IS BEGIN deb(DEB_ENTER, 'translateProxyCopyKey'); validateState(getProxyCopyCursor); deb(DEB_OPEN, 'findProxyCopyKey'); OPEN findProxyCopyKey(key => pc_key, deviceType => deviceType, statusMask => NVL(statusMask, computeAvailableMask(available, unavailable, deleted, expired))); getProxyCopyCursor := 'findProxyCopyKey'; getProxyCopyNoRows.error := -20310; getProxyCopyNoRows.msg := 'proxy copy is missing'; getProxyCopyByHandle := FALSE; deb(DEB_EXIT); END translateProxyCopyKey; -- Obsolete as of 8.1.6 ---------------------------- translateProxyCopyKey ---------------------------- PROCEDURE translateProxyCopyKey( pc_key IN number ,device_type IN varchar2 ,available IN number ,unavailable IN number ,deleted IN number ,recid OUT number ,stamp OUT number ,handle OUT varchar2) IS rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'translateProxyCopyKey815'); translateProxyCopyKey(pc_key => pc_key, deviceType => device_type, available => available, unavailable => unavailable, deleted => deleted, expired => unavailable); getProxyCopy(rcvRec => rcvRec, closeCursor => TRUE); recid := rcvRec.recid_con; stamp := rcvRec.stamp_con; handle := rcvRec.fileName_con; deb(DEB_EXIT); END translateProxyCopyKey; --------------------------- translateProxyCopyHandle -------------------------- PROCEDURE translateProxyCopyHandle( handle IN varchar2 ,deviceType IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,expired IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL) -- for compatability IS BEGIN deb(DEB_ENTER, 'translateProxyCopyHandle'); validateState(getProxyCopyCursor); deb(DEB_OPEN, 'findProxyCopy'); OPEN findProxyCopy(handle => handle, deviceType => deviceType, statusMask => NVL(statusMask, computeAvailableMask(available, unavailable, deleted, expired))); getProxyCopyCursor := 'findProxyCopy'; getProxyCopyNoRows.error := -20310; getProxyCopyNoRows.msg := 'proxy copy is missing'; getProxyCopyByHandle := TRUE; deb(DEB_EXIT); END translateProxyCopyHandle; -- Obsolete as of 8.1.6 --------------------------- translateProxyCopyHandle -------------------------- PROCEDURE translateProxyCopyHandle( handle IN varchar2 ,device_type IN varchar2 ,available IN number ,unavailable IN number ,deleted IN number ,recid OUT number ,stamp OUT number) IS rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'translateProxyCopyHandle815'); translateProxyCopyHandle(handle => handle, deviceType => device_type, available => available, unavailable => unavailable, deleted => deleted, expired => unavailable); getProxyCopy(rcvRec => rcvRec, closeCursor => TRUE); recid := rcvRec.recid_con; stamp := rcvRec.stamp_con; deb(DEB_EXIT); END translateProxyCopyHandle; -- Translate a proxy copy tag into a list of proxy copies ---------------------------- translateProxyCopyTag ---------------------------- PROCEDURE translateProxyCopyTag( tag IN varchar2 ,device_type IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,deleted IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL) -- for compatability IS BEGIN deb(DEB_ENTER, 'translateProxyCopyTag'); validateState(getProxyCopyCursor); deb(DEB_OPEN, 'findProxyCopy'); OPEN findProxyCopy(tag => tag, deviceType => device_type, statusMask => NVL(statusMask, computeAvailableMask(available, unavailable, deleted, unavailable/*expired*/))); getProxyCopyCursor := 'findProxyCopy'; getProxyCopyNoRows.error := -20310; getProxyCopyNoRows.msg := 'no matching proxy copy found'; getProxyCopyByHandle := FALSE; deb(DEB_EXIT); END translateProxyCopyTag; -- Obsolete as of 8.1.6 --------------------------------- getProxyCopy -------------------------------- PROCEDURE getProxyCopy( recid OUT number ,stamp OUT number ,handle OUT varchar2) IS rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'getProxyCopyHandle'); getProxyCopy(rcvRec); recid := rcvRec.recid_con; stamp := rcvRec.stamp_con; handle := rcvRec.fileName_con; deb(DEB_EXIT); EXCEPTION WHEN no_data_found THEN recid := NULL; -- indicate end-of-fetch deb(DEB_EXIT, 'with no more records'); END getProxyCopy; ------------------------------ -- Backup Piece Translation -- ------------------------------ -------------------------------- getBackupPiece ------------------------------- PROCEDURE getBackupPiece( bpRec OUT NOCOPY bpRec_t ,closeCursor IN boolean DEFAULT FALSE) IS dummy bpRec_t; local bpRec_t; eof boolean := FALSE; eob boolean := FALSE; BEGIN deb(DEB_ENTER, 'getBackupPiece'); IF (getBackupPieceDuplicates = FALSE# AND getBackupPieceDeviceType IS NULL) THEN -- If duplicate filtering is requested, it means only 1 copy of each -- piece is wanted. The deviceType must be specified in this case. -- If not, then raise an exception. deb(DEB_EXIT, 'with error 20999'); raise_application_error(-20999, 'deviceType must be specified'); END IF; <> IF (getBackupPieceCursor = 'findBackupPieceBpKey') THEN FETCH findBackupPieceBpKey INTO local; IF (findBackupPieceBpKey%NOTFOUND) THEN eof := TRUE; CLOSE findBackupPieceBpKey; END IF; IF (closeCursor AND findBackupPieceBpKey%ISOPEN) THEN CLOSE findBackupPieceBpKey; END IF; ELSIF (getBackupPieceCursor = 'findBackupPieceBsKey1') THEN FETCH findBackupPieceBsKey1 INTO local; IF (findBackupPieceBsKey1%NOTFOUND) THEN eof := TRUE; CLOSE findBackupPieceBsKey1; END IF; IF (closeCursor AND findBackupPieceBsKey1%ISOPEN) THEN CLOSE findBackupPieceBsKey1; END IF; ELSIF (getBackupPieceCursor = 'findBackupPieceBsKey2') THEN -- look at last backuppiece fetch local := getBackupPieceSeekLast; deb(DEB_PRINT, 'bskey = ' || local.bskey); -- terminate if it doesn't match the request IF (local.bskey != getBackupPieceBsKey OR local.deviceType != getBackupPieceDeviceType OR (getBackupPieceCopyNumber IS NOT NULL AND local.copyNumber != getBackupPieceCopyNumber)) THEN eob := TRUE; deb(DEB_PRINT, 'end of backupset'); ELSE <> LOOP -- get ready for next piece FETCH findBackupPieceBsKey2 INTO dummy; IF (findBackupPieceBsKey2%NOTFOUND) THEN CLOSE findBackupPieceBsKey2; dummy.bskey := 0; END IF; IF (dummy.bskey != getBackupPieceBsKey OR dummy.deviceType != getBackupPieceDeviceType OR getBackupPieceCopyNumber IS NULL OR dummy.copyNumber = getBackupPieceCopyNumber) THEN getBackupPieceSeekLast := dummy; EXIT checkAgain; END IF; END LOOP; deb(DEB_PRINT, 'next bskey=' || getBackupPieceSeekLast.bskey); END IF; ELSIF (getBackupPieceCursor = 'findBackupPiece_c') THEN FETCH findBackupPiece_c INTO local; IF (findBackupPiece_c%NOTFOUND) THEN eof := TRUE; CLOSE findBackupPiece_c; ELSE IF (getBackupPieceByHandle) THEN -- Make sure we can only fetch 1 row. FETCH findBackupPiece_c INTO dummy; IF (NOT findBackupPiece_c%NOTFOUND) THEN CLOSE findBackupPiece_c; deb(DEB_EXIT, 'with error 20261'); raise_application_error(-20261, 'Ambiguous backup piece handle'); END IF; END IF; END IF; IF (closeCursor AND findBackupPiece_c%ISOPEN) THEN CLOSE findBackupPiece_c; END IF; ELSE deb(DEB_EXIT, 'with error 20204'); raise_application_error(-20204, 'Translation not started'); END IF; IF (closeCursor OR eof) THEN getBackupPieceCursor := NULL; END IF; IF (eof OR eob) THEN -- if end of fetch or end of backupset -- If we've been requested to check that all pieces of the set -- have been found, then perform the check. IF (getBackupPieceDuplicates = FALSE#) THEN -- We don't want duplicates, which means duplicate filtering is -- in effect. We already asserted above that a deviceType has -- been specified, so we know filtering is possible. IF (getBackupPieceExpectedPieces IS NOT NULL AND (getBackupPieceAvailableMask IS NULL OR bitand(getBackupPieceAvailableMask, dbms_rcvman.BSpartial_avail) = 0) AND getBackupPieceExpectedPieces <> getBackupPiecePieceCount) THEN deb(DEB_EXIT, 'with error 20216'); raise_application_error(-20216, 'Backup piece is missing'); END IF; END IF; IF (getBackupPiecePieceCount = 0 AND getBackupPieceNoRows.error IS NOT NULL) THEN deb(DEB_EXIT, 'with norows error'); raise_application_error(getBackupPieceNoRows.error, getBackupPieceNoRows.msg); ELSE deb(DEB_EXIT, 'no more records'); RAISE no_data_found; END IF; END IF; IF (getBackupPieceDuplicates = FALSE#) THEN -- Client does not want duplicates, so lets filter out duplicate -- pieces. IF (local.pieceNumber = getBackupPieceLast.pieceNumber) THEN -- This is a duplicate piece GOTO nextRow; END IF; END IF; getBackupPieceLast := local; bpRec := local; -- set OUT mode arg getBackupPiecePieceCount := getBackupPiecePieceCount + 1; deb(DEB_EXIT); END getBackupPiece; --------------------------- translateBackupPieceKey --------------------------- PROCEDURE translateBackupPieceKey( key IN number ,available IN number DEFAULT TRUE# ,unavailable IN number DEFAULT TRUE# ,expired IN number DEFAULT TRUE# ,statusMask IN binary_integer DEFAULT NULL) -- for compatability IS BEGIN -- Open cursor to lookup bp by primary key. Treat expired backup pieces -- as if they were unavailable. We do this only because the interface -- to translateBackupPieceKey was not enhanced to have an "expired" -- argument when status 'X' was introduced. -- Replaces the cursor that was in the 8.1.x translateBackupPieceKey. deb(DEB_ENTER, 'translateBackupPieceKey'); findBackupPiece(bpKey => key, statusMask => NVL(statusMask, computeAvailableMask(available, unavailable, 0, unavailable))); getBackupPieceNoRows.error := -20260; getBackupPieceNoRows.msg := 'Backup piece is missing'; getBackupPieceAvailableMask := statusMask; deb(DEB_EXIT); END translateBackupPieceKey; --------------------------- translateBackupPieceKey --------------------------- PROCEDURE translateBackupPieceKey( bp_key IN number ,available IN number ,unavailable IN number ,recid OUT number ,stamp OUT number ,handle OUT varchar2 ,set_stamp OUT number ,set_count OUT number ,piece# OUT number) IS bpRec bpRec_t; BEGIN deb(DEB_ENTER, 'translateBackupPieceKey'); -- Open cursor to lookup bp by primary key. Treat expired backup pieces -- as if they were unavailable. We do this only because the interface -- to translateBackupPieceKey was not enhanced to have an "expired" -- argument when status 'X' was introduced. It should have been enhanced -- to have one, but since it was not, we have to assume the most -- reasonable default value. -- Replaces the cursor that was in the 8.1.x translateBackupPieceKey. translateBackupPieceKey(key => bp_key, statusMask => computeAvailableMask(available, unavailable, 0, unavailable/*expired*/)); getBackupPiece(bpRec => bpRec, closeCursor => TRUE); recid := bpRec.recid; stamp := bpRec.stamp; handle := bpRec.handle; set_stamp := bpRec.setStamp; set_count := bpRec.setCount; piece# := bpRec.pieceNumber; deb(DEB_EXIT); END translateBackupPieceKey; -------------------------- translateBackupPieceHandle ------------------------- PROCEDURE translateBackupPieceHandle( handle IN varchar2 ,deviceType IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,expired IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL) -- for compatability IS BEGIN deb(DEB_ENTER, 'translateBackupPieceHandle'); findBackupPiece(handle => handle, deviceType => deviceType, statusMask => NVL(statusMask, computeAvailableMask(available, unavailable, 0, expired))); getBackupPieceNoRows.error := -20260; getBackupPieceNoRows.msg := 'Backup piece is missing'; getBackupPieceByHandle := TRUE; getBackupPieceAvailableMask := statusMask; deb(DEB_EXIT); END translateBackupPieceHandle; -------------------------- translateBackupPieceHandle ------------------------- PROCEDURE translateBackupPieceHandle( -- only used in 8.1.6 handle IN varchar2 ,device_type IN varchar2 ,available IN number ,unavailable IN number ,recid OUT number ,stamp OUT number ,set_stamp OUT number ,set_count OUT number ,piece# OUT number) IS bpRec bpRec_t; BEGIN deb(DEB_ENTER, 'translateBackupPieceHandle816'); translateBackupPieceHandle(handle => handle, deviceType => device_type, statusMask => computeAvailableMask(available, unavailable, 0, unavailable/*expired*/)); getBackupPiece(bpRec => bpRec, closeCursor => TRUE); recid := bpRec.recid; stamp := bpRec.stamp; set_stamp := bpRec.setStamp; set_count := bpRec.setCount; piece# := bpRec.pieceNumber; deb(DEB_EXIT); END translateBackupPieceHandle; -- Translate a backup piece tag into a list of backup pieces --------------------------- translateBackupPieceTag --------------------------- PROCEDURE translateBackupPieceTag( tag IN varchar2 ,available IN number DEFAULT NULL -- for compatability ,unavailable IN number DEFAULT NULL -- for compatability ,statusMask IN binary_integer DEFAULT NULL) IS BEGIN deb(DEB_ENTER, 'translateBackupPieceTag'); findBackupPiece(tag => tag, statusMask => NVL(statusMask, computeAvailableMask(available, unavailable, 0, /* expired = */unavailable))); deb(DEB_EXIT); END translateBackupPieceTag; -------------------------- translateBackupPieceBSKey -------------------------- PROCEDURE translateBackupPieceBSKey( key IN number ,tag IN varchar2 DEFAULT NULL ,deviceType IN varchar2 DEFAULT NULL ,pieceCount IN number ,duplicates IN number DEFAULT TRUE# ,copyNumber IN number DEFAULT NULL ,available IN number DEFAULT TRUE# ,unavailable IN number DEFAULT FALSE# ,deleted IN number DEFAULT FALSE# ,expired IN number DEFAULT FALSE# ,statusMask IN binary_integer DEFAULT NULL) IS BEGIN deb(DEB_ENTER, 'translateBackupPieceBSKey'); findBackupPiece(bsKey => key, tag => tag, deviceType => deviceType, copyNumber => copyNumber, statusMask => NVL(statusMask, computeAvailableMask(available, unavailable, deleted, expired))); getBackupPieceDuplicates := duplicates; getBackupPieceExpectedPieces := pieceCount; getBackupPieceAvailableMask := statusMask; deb(DEB_EXIT); END translateBackupPieceBSKey; --------------------------- translateBackupPieceBsKey ------------------------- PROCEDURE translateBackupPieceBsKey( startBsKey IN number ,tag IN varchar2 DEFAULT NULL ,statusMask IN binary_integer DEFAULT NULL) IS BEGIN findBackupPiece(startBsKey => startBskey, tag => tag, statusMask => NVL(statusMask, computeAvailableMask(TRUE# /* available */, FALSE# /* unavailable */, FALSE# /* deleted */, FALSE# /* expired */))); getBackupPieceAvailableMask := statusMask; END translateBackupPieceBsKey; ---------------------------- translateSeekBpBsKey ----------------------------- PROCEDURE translateSeekBpBsKey( bsKey IN number ,deviceType IN varchar2 ,pieceCount IN number ,duplicates IN number DEFAULT TRUE# ,copyNumber IN number DEFAULT NULL) IS BEGIN deb(DEB_ENTER, 'translateSeekBpBsKey'); deb(DEB_IN, 'bskey=' || bsKey); IF (getBackupPieceCursor IS NULL OR getBackupPieceCursor != 'findBackupPieceBsKey2') THEN raise_application_error(-20204, 'Translation not started'); ELSIF (deviceType IS NULL) THEN raise_application_error(-20999, 'deviceType must be specified'); END IF; -- Initialize all of the getBackupPiece variables to their default -- state. getBackupPieceNoRows.error := NULL; getBackupPieceDuplicates := duplicates; getBackupPieceLast.pieceNumber := NULL; getBackupPieceDeviceType := deviceType; getBackupPieceExpectedPieces := pieceCount; getBackupPiecePieceCount := 0; getBackupPieceByHandle := FALSE; getBackupPieceCopyNumber := copyNumber; getBackupPieceBskey := bsKey; <> LOOP IF (NOT findBackupPieceBsKey2%ISOPEN) THEN -- all done deb(DEB_EXIT, 'cursor not open'); RAISE no_data_found; END IF; IF (getBackupPieceSeekLast.bskey > bsKey) THEN -- gone over the key deb(DEB_EXIT, 'gone over key seek=' || getBackupPieceSeekLast.bskey || ' key=' || bsKey); RAISE no_data_found; END IF; -- exit the loop if bskey, deviceType and copyNumber matches IF (getBackupPieceSeekLast.bskey = bsKey AND getBackupPieceSeekLast.deviceType = deviceType AND (copyNumber IS NULL OR getBackupPieceSeekLast.copyNumber = copyNumber)) THEN EXIT checkAgain; END IF; FETCH findBackupPieceBsKey2 INTO getBackupPieceSeekLast; IF (findBackupPieceBsKey2%NOTFOUND) THEN -- all done CLOSE findBackupPieceBsKey2; deb(DEB_EXIT, 'no more data'); RAISE no_data_found; END IF; END LOOP; deb(DEB_EXIT, 'got key=' || bsKey); END translateSeekBpBsKey; --------------------------- translateBpBsKeyCancel ----------------------------- PROCEDURE translateBpBsKeyCancel IS BEGIN IF (findBackupPieceBsKey1%ISOPEN) THEN CLOSE findBackupPieceBsKey1; END IF; IF (findBackupPieceBsKey2%ISOPEN) THEN CLOSE findBackupPieceBsKey2; END IF; getBackupPieceCursor := NULL; END translateBpBsKeyCancel; -- Obsolete as of 8.1.6 ---------------------------- translateBackupSetKey ---------------------------- PROCEDURE translateBackupSetKey( bs_key IN number ,device_type IN varchar2 ,available IN number ,unavailable IN number ,deleted IN number ,duplicates IN number ,backup_type OUT varchar2 ,recid OUT number ,stamp OUT number ,set_stamp OUT number ,set_count OUT number ,bslevel OUT number ,completion_time OUT date) IS bsRec bsRec_t; BEGIN deb(DEB_ENTER, 'translateBackupSetKey815'); findBackupSet(bsKey => bs_key, -- Lookup backset by key bsRec => bsRec); backup_type := bsRec.bsType; recid := bsRec.recid; stamp := bsRec.stamp; set_stamp := bsRec.setStamp; set_count := bsRec.setCount; bslevel := bsRec.level; completion_time := bsRec.compTime; -- Open cursor to lookup bp by backup set key. Treat expired pieces -- as if they were unavailable. We do this only because the interface -- to translateBackupSetKey was not enhanced to have an "expired" -- argument when status 'X' was introduced. It should have been enhanced -- to have one, but since it was not, we have to assume the most -- reasonable default value. -- Replaces bsq1 translateBackupPieceBSKey(key => bs_key, deviceType => device_type, pieceCount => bsRec.pieceCount, available => available, unavailable => unavailable, deleted => deleted, expired => unavailable); getBackupPieceDuplicates := duplicates; IF (device_type IS NULL) THEN -- The pre-8.1.6 RMAN is broken. When doing a LIST (surprise surprise) -- it calls this routine with device_type NULL, but the duplicates -- flag is FALSE# (don't want duplicates). This makes no sense because -- we can do duplicate filtering only when a device type is specified. -- The old version of this package tolerated this client behaviour, but -- this version is more strict and will raise an exception. So to avoid -- getting an exception when the old RMAN is calling us, set the -- duplicates flag to TRUE#. This package will then behave the same -- as the old one. getBackupPieceDuplicates := TRUE#; -- yes, we want duplicates END IF; IF (getBackupPieceDuplicates = FALSE#) THEN -- If we don't want duplicate pieces, then we are probably planning -- to access the backup set, so tell getBackupPiece to perform the -- missing piece check. getBackupPieceExpectedPieces := bsRec.pieceCount; END IF; deb(DEB_EXIT); END translateBackupSetKey; -- Obsolete as of 8.1 ---------------------------- translateBackupSetKey ---------------------------- PROCEDURE translateBackupSetKey( bs_key IN number ,device_type IN varchar2 ,available IN number ,unavailable IN number ,deleted IN number ,duplicates IN number ,backup_type OUT varchar2 ,recid OUT number ,stamp OUT number) IS set_stamp number; set_count number; bslevel number; completion_time date; BEGIN deb(DEB_ENTER, 'translateBackupSetKey80'); translateBackupSetKey(bs_key, device_type, available, unavailable, deleted, duplicates, backup_type, recid, stamp, set_stamp, set_count, bslevel, completion_time); deb(DEB_EXIT); END translateBackupSetKey; -- Obsolete as of 8.1.6 --------------------------- translateBackupSetRecid --------------------------- PROCEDURE translateBackupSetRecid( recid IN number ,stamp IN number ,device_type IN varchar2 ,bs_key OUT number ,bslevel OUT number ,completed OUT date) IS bsRec bsRec_t; pieceCount number; validationRec validBackupSetRec_t; gotRecord number; duplicates_flag number; BEGIN deb(DEB_ENTER, 'translateBackupSetRecid815'); findBackupSet(recid => recid, stamp => stamp, bsRec => bsRec); bs_key := bsRec.key; bslevel := bsRec.level; completed := bsRec.compTime; -- See if all pieces are available from the same copy#. We already -- know that the backupset is valid because we wouldn't be here -- otherwise. -- NOTE: device_type is null if doing REPORT OBSOLETE findValidBackupSet(backupSetRec => bsRec, tag => restoreTag, deviceType => device_type, available => TRUE#); gotRecord := getValidBackupSet(validBackupSetRec => validationRec); IF (getValidBackupSetCursor = 'findValidBackupSet_c') THEN CLOSE findValidBackupSet_c; ELSIF (getValidBackupSetCursor = 'findValidBackupSet1P_c') THEN CLOSE findValidBackupSet1P_c; END IF; IF (gotRecord = FALSE#) THEN -- Allow a mix of copy#s. It is probably the case that a backup piece -- is missing and getBackupPiece will raise that error. I don't -- know how we could have gotten here in that case, but getBackupPiece -- will do the right thing. validationRec.copyNumber := NULL; END IF; IF (device_type IS NULL) THEN -- This is the REPORT OBSOLETE case duplicates_flag := TRUE#; ELSE -- This is the normal case (restore/recover) duplicates_flag := FALSE#; END IF; translateBackupPieceBSKey(key => bsRec.key, tag => validationRec.tag, deviceType => device_type, pieceCount => bsRec.pieceCount, duplicates => duplicates_flag, copyNumber => validationRec.copyNumber, available => TRUE#); deb(DEB_EXIT); END translateBackupSetRecid; -- Obsolete as of 8.1 --------------------------- translateBackupSetRecid --------------------------- PROCEDURE translateBackupSetRecid( recid IN number ,stamp IN number ,device_type IN varchar2) IS bs_key number; bslevel number; completed date; BEGIN deb(DEB_ENTER, 'translateBackupSetRecid80'); translateBackupSetRecid(recid, stamp, device_type, bs_key, bslevel, completed); deb(DEB_EXIT); END translateBackupSetRecid; -- Obsolete as of 8.1.6 -------------------------------- getBackupPiece ------------------------------- PROCEDURE getBackupPiece( recid OUT number ,stamp OUT number ,bpkey OUT number ,set_stamp OUT number ,set_count OUT number ,piece# OUT number ,copy# OUT number ,status OUT varchar2 ,completion OUT date ,handle OUT varchar2) IS bpRec bpRec_t; BEGIN deb(DEB_ENTER, 'getBackupPiece815'); getBackupPiece(bpRec); recid := bpRec.recid; stamp := bpRec.stamp; bpkey := bpRec.key; set_stamp := bpRec.setStamp; set_count := bpRec.setCount; piece# := bpRec.pieceNumber; copy# := bpRec.copyNumber; status := bpRec.status; completion := bpRec.compTime; handle := bpRec.handle; deb(DEB_EXIT); EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with no more records'); bpRec.recid := NULL; -- indicate end-of-fetch to client END getBackupPiece; -- Obsolete as of 8.1 -------------------------------- getBackupPiece ------------------------------- PROCEDURE getBackupPiece( recid OUT number ,stamp OUT number ,set_stamp OUT number ,set_count OUT number ,piece# OUT number ,handle OUT varchar2) IS bpRec bpRec_t; BEGIN deb(DEB_ENTER, 'getBackupPiece80'); getBackupPiece(bpRec); recid := bpRec.recid; stamp := bpRec.stamp; set_stamp := bpRec.setStamp; set_count := bpRec.setCount; piece# := bpRec.pieceNumber; handle := bpRec.handle; deb(DEB_EXIT); EXCEPTION WHEN no_data_found THEN bpRec.recid := NULL; -- indicate end-of-fetch to client deb(DEB_EXIT, 'with no more records'); END getBackupPiece; ---------------------------- -- Backup Set Translation -- ---------------------------- ---------------------------- translateBackupSetKey ---------------------------- PROCEDURE translateBackupSetKey( key IN number ,bsRec OUT NOCOPY bsRec_t) IS BEGIN deb(DEB_ENTER, 'translateBackupSetKey'); findBackupSet(bsKey => key, bsRec => bsRec); deb(DEB_EXIT); END translateBackupSetKey; ------------------------ -- Controlfile Backup -- ------------------------ -- This is for 8.0.4 compatibility ---------------------------- findControlFileBackup ---------------------------- FUNCTION findControlFileBackup( type OUT number ,recid OUT number ,stamp OUT number ,fname OUT varchar2 ,device_type OUT varchar2 ,ckp_scn OUT number) RETURN number IS rcvRec rcvRec_t; rc number; BEGIN deb(DEB_ENTER, 'findControlFileBackup804'); rc := getControlfileBackup(rcvRec); IF (rc = SUCCESS) THEN IF (rcvRec.type_con = imageCopy_con_t) THEN type := COPY; ELSIF (rcvRec.type_con = backupSet_con_t) THEN type := BACKUP; ELSIF (rcvRec.type_con = proxyCopy_con_t) THEN type := PROXY; ELSE -- This is an unknown container type. deb(DEB_EXIT, 'with: UNAVAILABLE'); RETURN dbms_rcvman.UNAVAILABLE; END IF; IF (type = BACKUP) THEN recid := rcvRec.bsRecid_con; stamp := rcvRec.bsStamp_con; ELSE recid := rcvRec.recid_con; stamp := rcvRec.stamp_con; END IF; fname := rcvRec.fileName_con; device_type := rcvRec.deviceType_con; ckp_scn := rcvRec.toSCN_act; deb(DEB_EXIT, 'with: SUCCESS'); RETURN SUCCESS; ELSE deb(DEB_EXIT, 'with: '||to_char(rc)); RETURN rc; END IF; deb(DEB_EXIT); END findControlFileBackup; -- Obsolete as of 8.1.6 ---------------------------- findControlFileBackup ---------------------------- FUNCTION findControlFileBackup( type OUT number ,recid OUT number ,stamp OUT number ,fname OUT varchar2 ,device_type OUT varchar2 ,ckp_scn OUT number ,rlg_scn OUT number ,blksize OUT number) RETURN number IS rcvRec rcvRec_t; rc number; BEGIN deb(DEB_ENTER, 'findControlFileBackup815'); rc := getControlfileBackup(rcvRec); IF (rc = SUCCESS) THEN IF (rcvRec.type_con = imageCopy_con_t) THEN type := COPY; ELSIF (rcvRec.type_con = backupSet_con_t) THEN type := BACKUP; ELSIF (rcvRec.type_con = proxyCopy_con_t) THEN type := PROXY; rcvRec_last := rcvRec; -- save for translateProxyDFRecid ELSE -- This is an unknown container type. deb(DEB_EXIT, 'with: UNAVAILABLE'); RETURN dbms_rcvman.UNAVAILABLE; END IF; IF (type = BACKUP) THEN recid := rcvRec.bsRecid_con; stamp := rcvRec.bsStamp_con; ELSE recid := rcvRec.recid_con; stamp := rcvRec.stamp_con; END IF; fname := rcvRec.fileName_con; device_type := rcvRec.deviceType_con; ckp_scn := rcvRec.toSCN_act; rlg_scn := rcvRec.rlgSCN_act; blksize := rcvRec.blockSize_con; deb(DEB_EXIT, 'with: SUCCESS'); RETURN SUCCESS; ELSE deb(DEB_EXIT, 'with: '||to_char(rc)); RETURN rc; END IF; END findControlFileBackup; ------------------------- -- Archived Log Backup -- ------------------------- -- Obsolete as of 8.1.6 ---------------------------- findArchivedLogBackup ---------------------------- FUNCTION findArchivedLogBackup( thread# IN number ,sequence# IN number ,low_scn IN number ,type OUT number ,recid OUT number ,stamp OUT number ,device_type OUT varchar2) RETURN number IS rcvRec rcvRec_t; RC binary_integer; BEGIN deb(DEB_ENTER, 'findArchivedLogBackup'); -- NOTE: The previous implementation of this checked the restore_from -- variable. This is really not necessary as all RMAN versions always -- called setFrom(backup) before calling this procedure. findArchivedLogBackup(thread#, sequence#, low_scn); RC := getArchivedLogbackup(rcvRec); IF (RC = SUCCESS) THEN type := BACKUP; recid := rcvRec.bsRecid_con; stamp := rcvRec.bsStamp_con; device_type := rcvRec.deviceType_con; END IF; deb(DEB_EXIT, 'with: '||to_char(RC)); RETURN RC; END findArchivedLogBackup; --------------- -- List Copy -- --------------- ------------------------- listTranslateControlfileCopy ------------------------ PROCEDURE listTranslateControlfileCopy( tag IN varchar2 ,completedAfter IN date ,completedBefore IN date ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired ,liststby IN binary_integer DEFAULT NULL -- default for 8.1 ,file_pattern IN varchar2 DEFAULT NULL) IS currentIncarnation number; BEGIN deb(DEB_ENTER, 'listTranslateControlfileCopy'); IF (findControlfileBackup_c%ISOPEN) THEN -- should not be open CLOSE findControlfileBackup_c; END IF; IF (allIncarnations = TRUE#) THEN currentIncarnation := FALSE#; -- don't care about dbinc_key ELSE currentIncarnation := TRUE#; END IF; -- Replaces the lccf cursor deb(DEB_OPEN, 'findControlfileBackup_c'); OPEN findControlfileBackup_c( sourcemask => imageCopy_con_t, currentIncarnation => currentIncarnation, tag => tag, completedAfter => completedAfter, completedBefore => completedBefore, statusMask => statusMask, pattern => file_pattern, needstby => liststby); deb(DEB_EXIT); END listTranslateControlfileCopy; ---------------------------- listGetControlfileCopy --------------------------- PROCEDURE listGetControlfileCopy( rcvRec OUT NOCOPY rcvRec_t) IS BEGIN deb(DEB_ENTER, 'listGetControlfileCopy'); FETCH findControlfileBackup_c INTO rcvRec; IF (findControlfileBackup_c%NOTFOUND) THEN CLOSE findControlfileBackup_c; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; -- Do not need to check the allocated device types. Copies are available -- only on type DISK, so we can assume we wouldn't even be here if -- a DISK wasn't allocated. deb(DEB_EXIT); END listGetControlfileCopy; -- Obsolete as of 8.1.6 ---------------------------- listGetControlfileCopy --------------------------- FUNCTION listGetControlfileCopy( bcfkey OUT number, ckpscn OUT number, ckptime OUT date, status OUT varchar2, completion OUT date, fname OUT varchar2) RETURN number IS rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'listGetControlfileCopy'); listGetControlfileCopy(rcvRec); bcfkey := rcvRec.key_con; ckpscn := rcvRec.toSCN_act; ckptime := rcvRec.toTime_act; status := rcvRec.status_con; completion := rcvRec.compTime_con; fname := rcvRec.fileName_con; deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END listGetControlfileCopy; -------------------------- listTranslateDataFileCopy -------------------------- PROCEDURE listTranslateDataFileCopy( file# IN number ,creation_change# IN number ,tag IN varchar2 DEFAULT NULL ,file_name_pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable -- default for 8.1 ,pluginSCN IN number DEFAULT 0) IS creationSCN number; reset_scn number := NULL; reset_time date := NULL; BEGIN deb(DEB_ENTER, 'listTranslateDataFileCopy'); IF (allIncarnations = TRUE#) THEN reset_scn := NULL; reset_time := NULL; IF (ignoreCreationSCN = TRUE#) THEN -- Since the flag is true, we want to list copies of all -- incarnations of each datafile number. Set crescn to NULL so that -- the query returns all datafilecopies of a particular file#. -- This is used only by RMAN 8.1.5 and prior 8.1 releases. creationSCN := NULL; ELSE creationSCN := creation_change#; END IF; ELSE reset_scn := this_reset_scn; reset_time := this_reset_time; creationSCN := creation_change#; END IF; -- Replaces lcdf cursor deb(DEB_OPEN, 'findDatafileBackup_c'); OPEN findDatafileBackup_c(sourcemask => imageCopy_con_t, fno => file#, crescn => creationSCN, reset_scn => reset_scn, reset_time => reset_time, tag => tag, pattern => file_name_pattern, completedAfter => completedAfter, completedBefore => completedBefore, statusMask => statusMask, pluginSCN => pluginSCN); deb(DEB_EXIT); END listTranslateDataFileCopy; ----------------------------- listGetDataFileCopy ----------------------------- PROCEDURE listGetDataFileCopy( rcvRec OUT NOCOPY rcvRec_t) IS BEGIN deb(DEB_ENTER, 'listGetDataFileCopy'); FETCH findDatafileBackup_c INTO rcvRec; IF (findDatafileBackup_c%NOTFOUND) THEN CLOSE findDatafileBackup_c; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; -- Do not need to check the allocated device types. Copies are available -- only on type DISK, so we can assume we wouldn't even be here if -- a DISK wasn't allocated. deb(DEB_EXIT); END listGetDatafileCopy; -- Obsolete as of 8.1.6 ----------------------------- listGetDataFileCopy ----------------------------- FUNCTION listGetDataFileCopy( cdf_key OUT number ,status OUT varchar2 ,fname OUT varchar2 ,completion_time OUT date ,checkpoint_change# OUT number ,checkpoint_time OUT date) RETURN number IS rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'listGetDataFileCopy815'); listGetDatafileCopy(rcvRec); cdf_key := rcvRec.key_con; status := rcvRec.status_con; fname := rcvRec.fileName_con; completion_time := rcvRec.compTime_con; checkpoint_change# := rcvRec.toSCN_act; checkpoint_time := rcvRec.toTime_act; deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END listGetDataFileCopy; ------------------------- listTranslateArchivedLogCopy ------------------------ PROCEDURE listTranslateArchivedLogCopy( thread# IN number ,sequence# IN number ,first_change# IN number ,file_name_pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired -- 8.0/8.1 defaults ,needstby IN number DEFAULT NULL) IS currentIncarnation number; BEGIN deb(DEB_ENTER, 'listTranslateArchivedLogCopy'); IF (allIncarnations = TRUE#) THEN currentIncarnation := FALSE#; -- don't care about dbinc_key ELSE currentIncarnation := TRUE#; END IF; deb(DEB_OPEN, 'findArchivedLogCopy'); OPEN findArchivedLogCopy(currentIncarnation => currentIncarnation, thread => thread#, sequence => sequence#, lowSCN => first_change#, pattern => file_name_pattern, completedAfter => completedAfter, completedBefore => completedBefore, statusMask => statusMask, -- bug-2675757: As of 10i beta2, needstby is always NULL needstby => NULL); getrcvRecLast := NULL; deb(DEB_EXIT); END listTranslateArchivedLogCopy; ---------------------------- listGetArchivedLogCopy --------------------------- PROCEDURE listGetArchivedLogCopy( rcvRec OUT NOCOPY rcvRec_t) IS duplicate number; -- used for filtering duplicate names BEGIN deb(DEB_ENTER, 'listGetArchivedLogCopy'); -- Check if disk device is allocated. Copies are available -- only on type DISK and no tag is associated with archivelog copies. -- If any of these conditions are not satisfied - then no archivelogs IF (restoreTag is not NULL OR not diskDevice) THEN CLOSE findArchivedLogCopy; deb(DEB_EXIT, 'tag specified or no diskDevice allocated'); RAISE no_data_found; END IF; <> FETCH findArchivedLogCopy INTO rcvRec; IF (findArchivedLogCopy%NOTFOUND) THEN CLOSE findArchivedLogCopy; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; -- For catalog case... -- BEGIN_CAT_RCVMAN_ONLY -- Check if this log has same redo as earlier one IF (rcvRec.logThread_obj = getrcvRecLast.logThread_obj AND rcvRec.logSequence_obj = getrcvRecLast.logSequence_obj AND rcvRec.loglowSCN_obj = getrcvRecLast.loglowSCN_obj AND rcvRec.logrlgSCN_obj = getrcvRecLast.logrlgSCN_obj AND rcvRec.logrlgTime_obj = getrcvRecLast.logrlgTime_obj ) THEN duplicate := TRUE#; ELSE duplicate := FALSE#; END IF; IF IsDuplicateAlName(duplicate, rcvRec.filename_con) THEN GOTO nextRow; END IF; getrcvRecLast := rcvRec; -- END_CAT_RCVMAN_ONLY deb(DEB_EXIT); END listGetArchivedLogCopy; -- Obsolete as of 8.1.6 ---------------------------- listGetArchivedLogCopy --------------------------- FUNCTION listGetArchivedLogCopy( al_key OUT number ,status OUT varchar2 ,fname OUT varchar2 ,completion_time OUT date) RETURN number IS rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'listGetArchivedLogCopy'); listGetArchivedLogCopy(rcvRec); al_key := rcvRec.key_con; status := rcvRec.status_con; fname := rcvRec.fileName_con; completion_time := rcvRec.compTime_con; deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END listGetArchivedLogCopy; ----------------- -- List Backup -- ----------------- ------------------------ listTranslateControlfileBackup ----------------------- PROCEDURE listTranslateControlfileBackup( tag IN varchar2 ,completedAfter IN date ,completedBefore IN date ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired -- 8.0/8.1 defaults ,autobackup IN binary_integer DEFAULT BScfile_all ,liststby IN binary_integer DEFAULT NULL) IS currentIncarnation number; BEGIN deb(DEB_ENTER, 'listTranslateControlfileBackup'); IF (findControlfileBackup_c%ISOPEN) THEN -- should not be open CLOSE findControlfileBackup_c; END IF; IF (allIncarnations = TRUE#) THEN currentIncarnation := FALSE#; -- don't care about dbinc_key ELSE currentIncarnation := TRUE#; END IF; -- Replaces the lbcf cursor. Note that the tag is handled by -- validateBackupSet in the get() procedure. deb(DEB_OPEN, 'findControlfileBackup_c'); OPEN findControlfileBackup_c(sourcemask => backupSet_con_t, currentIncarnation => currentIncarnation, completedAfter => completedAfter, completedBefore => completedBefore, typemask => autobackup, needstby => liststby); -- The following parameters are saved in global variables for use by -- the pre-8.1.6 listGetControlfileBackup procedure. Note that the 8.1.6 -- procedure does NOT use them. Instead, it is expected that the 8.1.6 -- RMAN will use the findValidBackupSet procedures to get a list of the -- valid copies of a backup set. listGetBackupTag := tag; listGetBackupAvailableMask := statusMask; deb(DEB_EXIT); END listTranslateControlfileBackup; --------------------------- listGetControlfileBackup -------------------------- PROCEDURE listGetControlfileBackup( rcvRec OUT NOCOPY rcvRec_t) IS BEGIN deb(DEB_ENTER, 'listGetControlfileBackup'); FETCH findControlfileBackup_c INTO rcvRec; -- Note: no backupset validation done here. RMAN should use the -- findValidBackupSet procedures to do that. IF (findControlfileBackup_c%NOTFOUND) THEN CLOSE findControlfileBackup_c; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; deb(DEB_EXIT); END listGetControlfileBackup; -- Obsolete as of 8.1.6 --------------------------- listGetControlfileBackup -------------------------- FUNCTION listGetControlfileBackup( bskey OUT number, ckpscn OUT number, ckptime OUT date) RETURN number IS rcvRec rcvRec_t; validationRec validBackupSetRec_t; validationRC binary_integer; BEGIN deb(DEB_ENTER, 'listGetControlfileBackup815'); <> BEGIN listGetControlfileBackup(rcvRec); EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END; validationRC := validateBackupSet(backupSetRec => rcvRec, tag => listGetBackupTag, tagMatchRequired => TRUE, checkDeviceIsAllocated => TRUE, availableMask => listGetBackupAvailableMask, validRec => validationRec); IF (validationRC <> SUCCESS) THEN GOTO nextRow; END IF; bskey := rcvRec.bsKey_con; ckpscn := rcvRec.toSCN_act; ckptime := rcvRec.toTime_act; deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END listGetControlfileBackup; ------------------------ listTranslateSpfileBackup --------------------------- PROCEDURE listTranslateSpfileBackup( completedAfter IN date ,completedBefore IN date) IS BEGIN deb(DEB_ENTER, 'listTranslateSpfileBackup'); IF (findSpfileBackup_c%ISOPEN) THEN -- should not be open CLOSE findSpfileBackup_c; END IF; deb(DEB_OPEN, 'findSpfileBackup_c'); OPEN findSpfileBackup_c(completedAfter => completedAfter, completedBefore => completedBefore); deb(DEB_EXIT); END listTranslateSpfileBackup; --------------------------- listGetSpfileBackup ------------------------------ PROCEDURE listGetSpfileBackup( rcvRec OUT NOCOPY rcvRec_t) IS BEGIN deb(DEB_ENTER, 'listGetSpfileBackup'); FETCH findSpfileBackup_c INTO rcvRec; -- Note: no backupset validation done here. RMAN should use the -- findValidBackupSet procedures to do that. IF (findSpfileBackup_c%NOTFOUND) THEN CLOSE findSpfileBackup_c; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; deb(DEB_EXIT); END listGetSpfileBackup; ------------------------- listTranslateDataFileBackup ------------------------- PROCEDURE listTranslateDataFileBackup( file# IN number ,creation_change# IN number ,tag IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired -- 8.0/8.1 defaults ,pluginSCN IN number DEFAULT 0) IS rlgSCN number; rlgTime date; crescn number; BEGIN deb(DEB_ENTER, 'listTranslateDataFileBackup'); IF (findDatafileBackup_c%ISOPEN) THEN CLOSE findDatafileBackup_c; END IF; IF (allIncarnations = TRUE#) THEN IF (ignoreCreationSCN = TRUE#) THEN -- Since the flag is true, we want to list copies of all -- incarnations of each datafile number. Set crescn to NULL so that -- the query returns all datafilecopies of a particular file#. -- This is used only by RMAN 8.1.5 and prior 8.1 releases. -- Leave rlgSCN and Time be null. crescn := NULL; ELSE crescn := creation_change#; END IF; ELSE -- The 8.0 RMAN did not list backups that belonged to incarnations -- other than the current incarnation. rlgSCN := this_reset_scn; rlgTime := this_reset_time; crescn := creation_change#; END IF; -- Replaces lbdf deb(DEB_OPEN, 'findDatafileBackup_c'); OPEN findDatafileBackup_c(sourceMask => backupSet_con_t, fno => file#, crescn => crescn, reset_scn => rlgSCN, reset_time => rlgTime, completedAfter => completedAfter, completedBefore => completedBefore, pluginSCN => pluginSCN); -- The following parameters are saved in global variables for use by -- the pre-8.1.6 listGetDatafileBackup procedure. Note that the 8.1.6 -- procedure does NOT use them. Instead, it is expected that the 8.1.6 -- RMAN will use the findValidBackupSet procedures to get a list of the -- valid copies of a backup set. listGetBackupTag := tag; listGetBackupAvailableMask := statusMask; deb(DEB_EXIT); END listTranslateDataFileBackup; ---------------------------- listGetDataFileBackup ---------------------------- PROCEDURE listGetDataFileBackup( rcvRec OUT NOCOPY rcvRec_t) IS BEGIN deb(DEB_ENTER, 'listGetDataFileBackup'); FETCH findDatafileBackup_c INTO rcvRec; IF (findDatafileBackup_c%NOTFOUND) THEN CLOSE findDatafileBackup_c; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; deb(DEB_EXIT); END listGetDataFileBackup; -- Obsolete as of 8.1.6 ---------------------------- listGetDataFileBackup ---------------------------- FUNCTION listGetDataFileBackup( bs_key OUT number ,backup_type OUT varchar2 ,incremental_level OUT number ,completion_time OUT date ,checkpoint_change# OUT number ,checkpoint_time OUT date) RETURN number IS rcvRec rcvRec_t; valRC binary_integer; validationRec validBackupSetRec_t; BEGIN deb(DEB_ENTER, 'listGetDataFileBackup815'); <> BEGIN listGetDataFileBackup(rcvRec => rcvRec); EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END; valRC := validateBackupSet(backupSetRec => rcvRec, tag => listGetBackupTag, tagMatchRequired => TRUE, checkDeviceIsAllocated => TRUE, availableMask => listGetBackupAvailableMask, validRec => validationRec); IF (valRC <> SUCCESS) THEN GOTO nextRow; END IF; bs_key := rcvRec.bsKey_con; IF (rcvRec.fromSCN_act = 0) THEN backup_type := 'Full'; ELSE backup_type := 'Incremental'; END IF; incremental_level := rcvRec.level_act; completion_time := rcvRec.compTime_con; -- somewhat bogus checkpoint_change# := rcvRec.toSCN_act; checkpoint_time := rcvRec.toTime_act; deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; END listGetDataFileBackup; -- A stupid idea from the 8.1 LIST implementation ----------------------------- translateBackupFile ----------------------------- PROCEDURE translateBackupFile( bs_recid IN number ,bs_stamp IN number ,fno IN number ,bskey OUT number ,inclevel OUT number ,backup_type OUT varchar2 ,completed OUT date) IS BEGIN deb(DEB_ENTER, 'translateBackupFile'); -- Rather than running another query to get values we already -- fetched, we simply save the last rcvRec we fetched in a global -- variable. We validate that the record matches -- our input args, and then simply extract the values and return them. -- The 8.1 implementation of LIST should have simply extended those other -- functions to return the values that it required rather than executing -- a 2nd query to fetch them, but that did not happen. IF (rcvRec_last.type_con <> backupSet_con_t OR rcvRec_last.bsRecid_con <> bs_recid OR rcvRec_last.bsStamp_con <> bs_stamp) THEN deb(DEB_EXIT, 'with error 20204'); raise_application_error(-20204, 'Translation not started'); END IF; bskey := rcvRec_last.bsKey_con; inclevel := rcvRec_last.level_act; completed := rcvRec_last.compTime_con; IF (rcvRec_last.logSequence_obj IS NOT NULL) THEN backup_type := 'Archived Log'; ELSE IF (rcvRec_last.fromSCN_act = 0) THEN backup_type := 'Full'; ELSE backup_type := 'Incremental'; END IF; END IF; deb(DEB_EXIT); END translateBackupFile; -- Used by 8.0 and 8.1.6, but not 8.1 ------------------------ listTranslateArchivedLogBackup ----------------------- PROCEDURE listTranslateArchivedLogBackup( thread# IN number ,sequence# IN number ,first_change# IN number ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired) -- 8.0/8.1 defaults IS currentInc number; BEGIN deb(DEB_ENTER, 'listTranslateArchivedLogBackup'); IF (allIncarnations = TRUE#) THEN currentInc := FALSE#; -- don't care about dbinc_key ELSE currentInc := TRUE#; END IF; deb(DEB_OPEN, 'findArcLogBackup'); OPEN findArcLogBackup(sourcemask => backupSet_con_t, currentIncarnation => currentInc, thread => thread#, sequence => sequence#, lowSCN => first_change#, completedAfter => completedAfter, completedBefore => completedBefore); listGetBackupAvailableMask := statusMask; deb(DEB_EXIT); END listTranslateArchivedLogBackup; --------------------------- listGetArchivedLogBackup -------------------------- PROCEDURE listGetArchivedLogBackup( rcvRec OUT NOCOPY rcvRec_t) IS BEGIN deb(DEB_ENTER, 'listGetArchivedLogBackup'); FETCH findArcLogBackup INTO rcvRec; IF (findArcLogBackup%NOTFOUND) THEN CLOSE findArcLogBackup; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; deb(DEB_EXIT); END listGetArchivedLogBackup; -- Obsolete as of 8.1 --------------------------- listGetArchivedLogBackup -------------------------- FUNCTION listGetArchivedLogBackup( bs_key OUT number ,completion_time OUT date) RETURN number IS rcvRec rcvRec_t; validRec validBackupSetRec_t; valRC binary_integer; BEGIN deb(DEB_ENTER, 'listGetArchivedLogBackup'); <> listGetArchivedLogBackup(rcvRec); valRC := validateBackupSet(backupSetRec => rcvRec, checkDeviceIsAllocated => TRUE, availableMask => listGetBackupAvailableMask, validRec => validRec); IF (valRC <> SUCCESS) THEN GOTO get_next; END IF; bs_key := rcvRec.bsKey_con; completion_time := rcvRec.compTime_con; deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END listGetArchivedLogBackup; -- Obsolete as of 8.1.6. -- This procedure is just a bad idea left here for backwards compatibility -- with the broken LIST BACKUP OF ARCHIVELOG command of 8.1. ------------------------ listTranslateArchivedLogBackup ----------------------- PROCEDURE listTranslateArchivedLogBackup( thread# IN number DEFAULT NULL ,lowseq IN number DEFAULT NULL ,highseq IN number DEFAULT NULL ,lowscn IN number DEFAULT NULL ,highscn IN number DEFAULT NULL ,from_time IN date DEFAULT NULL ,until_time IN date DEFAULT NULL ,pattern IN varchar2 DEFAULT NULL) IS BEGIN deb(DEB_ENTER, 'listTranslateArchivedLogBackup815'); if lbal2%isopen then close lbal2; end if; deb(DEB_OPEN, 'lbal2'); open lbal2(thread#, lowseq, highseq, lowscn, highscn, from_time, until_time); deb(DEB_EXIT); END listTranslateArchivedLogBackup; -- Obsolete as of 8.1.6 --------------------------- listGetArchivedLogBackup -------------------------- FUNCTION listGetArchivedLogBackup( bs_key OUT number ,thread# OUT number ,sequence# OUT number ,first_change# OUT number ,next_change# OUT number ,first_time OUT date ,next_time OUT date) RETURN number IS rcvRec rcvRec_t; validRec validBackupSetRec_t; BEGIN deb(DEB_ENTER, 'listGetArchivedLogBackup815'); <> fetch lbal2 into rcvRec; if lbal2%found then IF (debug) THEN deb(DEB_PRINT, 'listGetArchivedLogBackup: got a backupset:'); printRcvRec(rcvRec); END IF; if validateBackupSet(backupSetRec => rcvRec, checkDeviceIsAllocated => TRUE, availableMask => dbms_rcvman.BSavailable + dbms_rcvman.BSunavailable + dbms_rcvman.BSexpired, validRec => validRec) <> SUCCESS then goto get_next; end if; bs_key := rcvRec.bsKey_con; thread# := rcvRec.logThread_obj; sequence# := rcvRec.logSequence_obj; first_change# := rcvRec.logLowSCN_obj; next_change# := rcvRec.logNextSCN_obj; first_time := rcvRec.logLowTime_obj; next_time := rcvRec.logNextTime_obj; deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; else close lbal2; deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; end if; END listGetArchivedLogBackup; -------------------- -- List Backupset -- -------------------- PROCEDURE listTranslateBackupsetFiles( bs_key IN number) IS BEGIN IF findBackupsetFiles%ISOPEN THEN CLOSE findBackupsetFiles; END IF; OPEN findBackupsetFiles(bs_key); END; PROCEDURE listGetBackupsetFiles( rcvRec OUT NOCOPY rcvRec_t) IS BEGIN FETCH findBackupsetFiles INTO rcvRec; IF (findBackupsetFiles%NOTFOUND) THEN CLOSE findBackupsetFiles; RAISE no_data_found; END IF; END; ------------------------ -- List All BackupSet -- ------------------------ PROCEDURE translateAllBackupSet( backupType IN binary_integer ,tag IN varchar2 ,statusMask IN binary_integer ,completedAfter IN date ,completedBefore IN date ,onlyrdf IN binary_integer DEFAULT 0) IS BEGIN IF findAllBackupPiece%ISOPEN THEN CLOSE findAllBackupPiece; END IF; OPEN findAllBackupPiece(backupType => backupType ,tag => tag ,statusMask => statusMask ,completedAfter => completedAfter ,completedBefore => completedBefore ,onlyrdf => onlyrdf); END; PROCEDURE getAllBackupSet( rcvRec OUT NOCOPY rcvRec_t) IS BEGIN FETCH findAllBackupPiece INTO rcvRec; IF (findAllBackupPiece%NOTFOUND) THEN CLOSE findAllBackupPiece; RAISE no_data_found; END IF; END; --------------------- -- List Proxy Copy -- --------------------- -- Note that this is used for both datafiles and the controlfile -------------------------- listTranslateProxyDataFile ------------------------- PROCEDURE listTranslateProxyDataFile( file# IN number ,creation_change# IN number ,tag IN varchar2 DEFAULT NULL ,handle_pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired ,liststby IN binary_integer DEFAULT NULL ,pluginSCN IN number DEFAULT 0) IS currentInc number; crescn number; reset_scn number := NULL; reset_time date := NULL; BEGIN deb(DEB_ENTER, 'listTranslateProxyDataFile'); validateState(null); IF (allIncarnations = TRUE#) THEN currentInc := FALSE#; -- don't care about dbinc_key IF (ignoreCreationSCN = TRUE#) THEN -- Since the flag is true, we want to list copies of all -- incarnations of each datafile number. Set crescn to NULL so that -- the query returns all datafilecopies of a particular file#. -- This is used only by RMAN 8.1.5 and prior 8.1 releases. crescn := NULL; ELSE crescn := creation_change#; END IF; ELSE currentInc := TRUE#; crescn := creation_change#; END IF; IF (currentInc = TRUE#) THEN reset_scn := this_reset_scn; reset_time := this_reset_time; END IF; IF (file# = 0) THEN IF (findControlfileBackup_c%ISOPEN) THEN CLOSE findControlfileBackup_c; END IF; -- This replaces lxdf deb(DEB_OPEN, 'findControlfileBackup_c'); OPEN findControlfileBackup_c( sourcemask => proxyCopy_con_t, currentIncarnation => currentInc, tag => tag, pattern => handle_pattern, completedAfter => completedAfter, completedBefore => completedBefore, statusMask => statusMask, needstby => liststby); listGetProxyDatafileCursor := 'findControlfileBackup_c'; ELSE IF (findDatafileBackup_c%ISOPEN) THEN CLOSE findDatafileBackup_c; END IF; -- This replaces lxdf OPEN findDatafileBackup_c(sourcemask => proxyCopy_con_t, fno => file#, crescn => crescn, reset_scn => reset_scn, reset_time => reset_time, tag => tag, pattern => handle_pattern, completedAfter => completedAfter, completedBefore => completedBefore, statusMask => statusMask, pluginSCN => pluginSCN); listGetProxyDatafileCursor := 'findDatafileBackup_c'; END IF; deb(DEB_EXIT); END listTranslateProxyDataFile; ----------------------------- listGetProxyDataFile ---------------------------- PROCEDURE listGetProxyDataFile( rcvRec OUT NOCOPY rcvRec_t) IS local rcvRec_t; BEGIN deb(DEB_ENTER, 'listGetProxyDataFile'); <> IF (listGetProxyDatafileCursor = 'findControlfileBackup_c') THEN FETCH findControlfileBackup_c INTO local; IF (findControlfileBackup_c%NOTFOUND) THEN CLOSE findControlfileBackup_c; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; ELSIF (listGetProxyDatafileCursor = 'findDatafileBackup_c') THEN FETCH findDatafileBackup_c INTO local; IF (findDatafileBackup_c%NOTFOUND) THEN CLOSE findDatafileBackup_c; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; ELSE deb(DEB_EXIT, 'with error 20204'); raise_application_error(-20204, 'Translation not started'); END IF; -- Proxy copies can be on different device types, so make sure we -- have the right one allocated. IF (anyDevice = FALSE# AND isDeviceTypeAllocated(local.deviceType_con) = FALSE#) THEN GOTO nextRow; END IF; rcvRec := local; -- set OUT mode arg deb(DEB_EXIT); END listGetProxyDataFile; -- Obsolete as of 8.1.6 ----------------------------- listGetProxyDataFile ---------------------------- FUNCTION listGetProxyDataFile( xdf_key OUT number ,recid OUT number ,stamp OUT number ,status OUT varchar2 ,handle OUT varchar2 ,completion_time OUT date ,checkpoint_change# OUT number ,checkpoint_time OUT date) RETURN number IS rcvRec rcvRec_t; BEGIN deb(DEB_ENTER, 'listGetProxyDataFile815'); listGetProxyDataFile(rcvRec); xdf_key := rcvRec.key_con; recid := rcvRec.recid_con; stamp := rcvRec.stamp_con; status := rcvRec.status_con; handle := rcvRec.fileName_con; completion_time := rcvRec.compTime_con; checkpoint_change# := rcvRec.toSCN_act; checkpoint_time := rcvRec.toTime_act; deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; EXCEPTION WHEN no_data_found THEN deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END listGetProxyDataFile; ----------------------- listTranslateProxyArchivedLog ------------------------ PROCEDURE listTranslateProxyArchivedLog( thread# IN number ,sequence# IN number ,first_change# IN number ,tag IN varchar2 DEFAULT NULL ,handle_pattern IN varchar2 DEFAULT NULL ,completedAfter IN date DEFAULT NULL ,completedBefore IN date DEFAULT NULL ,statusMask IN binary_integer DEFAULT BSavailable+BSunavailable+BSexpired) IS currentIncarnation number; BEGIN deb(DEB_ENTER, 'listTranslateProxyArchivedLog'); IF (allIncarnations = TRUE#) THEN currentIncarnation := FALSE#; -- don't care about dbinc_key ELSE currentIncarnation := TRUE#; END IF; deb(DEB_OPEN, 'findArcLogBackup'); OPEN findArcLogBackup(sourcemask => proxyCopy_con_t, currentIncarnation => currentIncarnation, thread => thread#, sequence => sequence#, lowSCN => first_change#, tag => tag, pattern => handle_pattern, completedAfter => completedAfter, completedBefore => completedBefore, statusMask => statusMask); deb(DEB_EXIT); END listTranslateProxyArchivedLog; --------------------------- listGetProxyArchivedLog -------------------------- PROCEDURE listGetProxyArchivedLog( rcvRec OUT NOCOPY rcvRec_t) IS local rcvRec_t; BEGIN deb(DEB_ENTER, 'listGetProxyArchivedLog'); <> FETCH findArcLogBackup INTO local; IF (findArcLogBackup%NOTFOUND) THEN CLOSE findArcLogBackup; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; -- Proxy copies can be on different device types, so make sure we -- have the right one allocated. IF (anyDevice = FALSE# AND isDeviceTypeAllocated(local.deviceType_con) = FALSE#) THEN GOTO nextRow; END IF; rcvRec := local; -- set OUT mode arg deb(DEB_EXIT); END listGetProxyArchivedLog; ------------------------------- -- List Database Incarnation -- ------------------------------- -------------------------- listTranslateDBIncarnation ------------------------- PROCEDURE listTranslateDBIncarnation( db_name IN varchar2 DEFAULT NULL, all_databases IN number DEFAULT 0) IS BEGIN deb(DEB_ENTER, 'listTranslateDBIncarnation'); IF (ldbi%isopen) THEN CLOSE ldbi; END IF; deb(DEB_OPEN, 'ldbi'); OPEN ldbi(upper(db_name), all_databases); deb(DEB_EXIT); END listTranslateDBIncarnation; ----------------------------- listGetDBIncarnation ---------------------------- FUNCTION listGetDBIncarnation( db_key OUT number ,dbinc_key OUT number ,db_name OUT varchar2 ,db_id OUT number ,current_inc OUT varchar2 ,resetlogs_change# OUT number ,resetlogs_time OUT date ,dbinc_status OUT varchar2) RETURN number IS BEGIN deb(DEB_ENTER, 'listGetDBIncarnation'); FETCH ldbi INTO db_key, dbinc_key, db_name, db_id, current_inc, resetlogs_change#, resetlogs_time, dbinc_status; IF (ldbi%found) THEN deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; ELSE CLOSE ldbi; deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END IF; deb(DEB_EXIT); END listGetDBIncarnation; ---------------- listGetDBIncarnation - Pre 10i RMAN calls this fn ------------ FUNCTION listGetDBIncarnation( db_key OUT number ,dbinc_key OUT number ,db_name OUT varchar2 ,db_id OUT number ,current_inc OUT varchar2 ,resetlogs_change# OUT number ,resetlogs_time OUT date) RETURN number IS dbinc_status varchar2(9); BEGIN RETURN listGetDBIncarnation(db_key, dbinc_key, db_name, db_id, current_inc, resetlogs_change#, resetlogs_time, dbinc_status); END listGetDBIncarnation; ------------------------------- -- List Database Site -- ------------------------------- -------------------------- listTranslateDBSite -------------------------------- PROCEDURE listTranslateDBSite( db_name IN varchar2 DEFAULT NULL, alldbs IN binary_integer DEFAULT 1) IS BEGIN deb(DEB_ENTER, 'listTranslateDBSite'); IF (lnni%isopen) THEN CLOSE lnni; END IF; deb(DEB_OPEN, 'lnni'); OPEN lnni(db_name, alldbs); deb(DEB_EXIT); END listTranslateDBSite; ----------------------------- listGetDBSite ----------------------------------- FUNCTION listGetDBSite( db_key OUT number ,db_id OUT number ,db_name OUT varchar2 ,db_role OUT varchar2 ,db_unique_name OUT varchar2) RETURN number IS BEGIN deb(DEB_ENTER, 'listGetDBSite'); FETCH lnni INTO db_key, db_id, db_name, db_role, db_unique_name; deb(DEB_PRINT, 'site name =['||db_unique_name||']'); IF (lnni%found) THEN deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; ELSE CLOSE lnni; deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END IF; deb(DEB_EXIT); END listGetDBSite; -------------------------------------- -- List Rollback Segment Tablespace -- -------------------------------------- -------------------------- listRollbackSegTableSpace -------------------------- PROCEDURE listRollbackSegTableSpace IS BEGIN deb(DEB_ENTER, 'listRollbackSegTableSpace'); -- BEGIN_DB_OPEN_ONLY IF (lrtbs%ISOPEN) THEN CLOSE lrtbs; END IF; deb(DEB_OPEN, 'lrtbs'); OPEN lrtbs; -- END_DB_OPEN_ONLY --FIXED_ONLY raise_application_error(-20300, 'Not supported without recovery catalog'); deb(DEB_EXIT); END listRollbackSegTableSpace; ------------------------------ listGetTableSpace ------------------------------ FUNCTION listGetTableSpace( ts# OUT number ,ts_name OUT varchar2) RETURN number IS BEGIN deb(DEB_ENTER, 'listGetTableSpace'); -- BEGIN_DB_OPEN_ONLY FETCH lrtbs INTO ts#, ts_name; IF (lrtbs%FOUND) THEN deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; ELSE CLOSE lrtbs; deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; END IF; -- END_DB_OPEN_ONLY --FIXED_ONLY raise_application_error(-20300, 'Not supported without recovery catalog'); deb(DEB_EXIT); END listGetTableSpace; ------------------------ -- Incremental Backup -- ------------------------ ------------------------------ getIncrementalScn ------------------------------ PROCEDURE getIncrementalScn( first IN boolean -- open the cursor if this is TRUE ,file# IN number ,create_scn IN number ,reset_scn IN number ,reset_time IN date ,incr_level IN number ,cumulative IN number ,rcvRec OUT NOCOPY rcvRec_t ,sourcemask IN number DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,pluginSCN IN number DEFAULT 0 ,keep IN boolean DEFAULT NULL) IS ilevel number; local rcvRec_t; validRec validBackupSetRec_t; usable_incr rcvRec_t; available_fromSCN_act number; BEGIN deb(DEB_ENTER, 'getIncrementalScn'); IF (incr_level not in (1,2,3,4) OR incr_level is NULL) THEN raise_application_error(-20270, 'invalid incremental level'); END IF; IF (cumulative not in (0,1) OR cumulative is NULL) THEN raise_application_error(-20271, 'invalid cumulative option'); END IF; -- Find the backup with highest checkpoint scn that -- o belongs to the incarnation of datafile -- o matches the given file# -- o is an incremental backup/copy at level N or less if non-cumulative or -- is an incremental backup/copy at level N-1 or less if cumulative -- o belongs to an available backup set if backup -- NOTE: Backups from ancestral incarnations are ignored, even if there is -- a spanning offline range. Incremental backups cannot cross resetlogs -- boundaries. IF (cumulative = TRUE#) THEN ilevel := incr_level - 1; -- Consider only higher level backups ELSE ilevel := incr_level; END IF; -- In case if this fno is not added by translation, add it now. IF (file# IS NOT NULL) THEN setDfTransClause(fno => file#); END IF; IF first THEN IF (findDatafileBackup_c%ISOPEN) THEN CLOSE findDatafileBackup_c; END IF; getDatafileBackupLast.type_con := NULL; -- clear the last backup record -- hint for caching valid backupset cacheBsRecTable.hint := redundantHint; -- null reset_scn and reset_time means current incarnation OPEN findDatafileBackup_c(sourcemask => sourcemask, fno => file#, crescn => create_scn, reset_scn => nvl(reset_scn, this_reset_scn), reset_time => nvl(reset_time, this_reset_time), level => ilevel, statusMask => BSavailable, tag => tag, onlytc => TRUE#, pluginSCN => pluginSCN); END IF; IF (NOT findDatafileBackup_c%ISOPEN) THEN raise_application_error(-20272, 'cannot take incremental backup'); END IF; LOOP <> FETCH findDatafileBackup_c INTO local; IF (findDatafileBackup_c%NOTFOUND) THEN deb(DEB_PRINT, 'closing cursor'); CLOSE findDatafileBackup_c; cacheBsRecTable.hint := noHint; IF (file# is NOT NULL) THEN -- there were no backups available for this file deb(DEB_EXIT, 'with: cannot take incr backup'); raise_application_error(-20272, 'cannot take incremental backup'); ELSE deb(DEB_EXIT, 'with: no data found'); raise no_data_found; -- no more datafile backups END IF; END IF; -- Non-keep backups should not find keep backups, and keep backups -- should only find keep backups with the same tag. The tag should -- have been provided on the call, so we just need to ensure keep == keep -- This would not be necessary if we used dfBackupHistory_c2 instead -- of findDatafileBackup_c. But I'm not that brave right now. RG IF (keep IS NOT NULL AND -- NULL means 10g client ((local.keep_options != 0 AND NOT keep) OR -- local_keep and not keep (local.keep_options = 0 AND keep))) THEN -- not local_keep and keep deb(DEB_PRINT, 'Keep does not match for ' || local.key_con || ' completed at ' || to_char(local.compTime_con, 'DD-MON-YY HH24:MI:SS')); GOTO nextRow; -- keep attributes do not match END IF; IF (available_fromSCN_act IS NULL AND getDatafileBackupLast.type_con IS NOT NULL AND getDatafileBackupLast.dfNumber_obj = local.dfNumber_obj) THEN deb(DEB_PRINT, 'already returned incremental scn for file# ' || local.dfNumber_obj); GOTO nextRow; -- this is a duplicate of what we returned earlier END IF; -- before we actually return the incremental scn for this datafile, we -- found record for another datafile, so it means we have a hole in -- incrementals for the file. IF (available_fromSCN_act IS NOT NULL AND (usable_incr.dfNumber_obj <> local.dfNumber_obj OR (usable_incr.dfNumber_obj = local.dfNumber_obj AND usable_incr.dfCreationSCN_obj <> local.dfCreationSCN_obj))) THEN deb(DEB_PRINT, 'no level 0 found for this file# ' || usable_incr.dfNumber_obj || ', creation_scn '|| usable_incr.dfCreationSCN_obj); usable_incr := NULL; available_fromSCN_act := NULL; END IF; IF (CheckRecAction(local) = action_SKIP) THEN deb(DEB_PRINT, 'on orphan branch'); GOTO nextRow; -- this action belongs to orphan branch END IF; IF (usable_incr.dfNumber_obj = local.dfNumber_obj AND usable_incr.dfCreationSCN_obj = local.dfCreationSCN_obj AND local.fromSCN_act > 0 AND available_fromSCN_act < local.fromSCN_act) THEN deb(DEB_PRINT, 'overlapping incremental found for file# ' || usable_incr.dfNumber_obj || ', creation_scn '|| usable_incr.dfCreationSCN_obj); GOTO nextRow; -- overlapping incremental END IF; IF (local.type_con = backupSet_con_t) THEN -- We have to check the validity of the backupset to -- base an incremental backup upon it. IF (validateBackupSet(backupSetRec => local, tag => tag, tagMatchRequired => TRUE, checkDeviceIsAllocated => FALSE, availableMask => BSavailable, validRec => validRec) = dbms_rcvman.UNAVAILABLE) THEN deb(DEB_PRINT, 'incremental is unavailable'); GOTO nextRow; -- can't create an incr based on unavail backup END IF; -- If this is first incremental for this file, then it is possible -- to base the next incremental on this backup as long as there is -- no broken chain of incrementals to level 0 backup. OR -- If this is an incremental that breaks the chain of incrementals to -- level 0, then it is possible to base the next incremental -- on this backup as long as there is no broken chain of incrementals -- to level 0 backup from this backup. IF (available_fromSCN_act IS NULL OR (usable_incr.dfNumber_obj = local.dfNumber_obj AND usable_incr.dfCreationSCN_obj = local.dfCreationSCN_obj AND local.toSCN_act < available_fromSCN_act)) THEN IF (available_fromSCN_act IS NULL) THEN deb(DEB_PRINT, 'available_fromSCN_act set to ' || available_fromSCN_act || ' for file# ' || local.dfNumber_obj || ', creation_scn '|| local.dfCreationSCN_obj); ELSE deb(DEB_PRINT, 'broken chain, available_fromSCN_act set to ' || available_fromSCN_act || ' for file ' || local.dfNumber_obj || ', creation_scn '|| local.dfCreationSCN_obj); END IF; usable_incr := local; available_fromSCN_act := local.fromSCN_act; END IF; END IF; -- remember the last incremental which can be used as base for -- usable_incr. When we find a level 0, then the incremental from scn -- can be used as toSCN of usable_incr backup. IF (usable_incr.dfNumber_obj = local.dfNumber_obj AND usable_incr.dfCreationSCN_obj = local.dfCreationSCN_obj AND local.toSCN_act >= available_fromSCN_act AND local.fromSCN_act < available_fromSCN_act) THEN available_fromSCN_act := local.fromSCN_act; deb(DEB_PRINT, 'available_fromSCN_act moved to ' || available_fromSCN_act || ' for file# ' || local.dfNumber_obj || ', creation_scn '|| local.dfCreationSCN_obj); END IF; -- we havn't yet seen a level 0 to which the available incrementals can -- be applied. IF (available_fromSCN_act != usable_incr.dfCreationSCN_obj AND available_fromSCN_act > 0) THEN deb(DEB_PRINT, 'need more incrementals to validate chain'); GOTO nextRow; END IF; If (available_fromSCN_act = usable_incr.dfCreationSCN_obj OR available_fromSCN_act = 0) THEN deb(DEB_PRINT, 'validated incremental to level 0, incremental scn=' || usable_incr.toSCN_act || 'for file ' || usable_incr.dfNumber_obj); rcvRec := usable_incr; ELSE rcvRec := local; deb(DEB_PRINT, 'using level0 proxy/copy, incremental scn=' || local.toSCN_act || ' for file ' || local.dfNumber_obj); END IF; getDatafileBackupLast := rcvRec; -- remember the last record returned deb(DEB_EXIT, 'with: valid record '); EXIT; -- valid record. Create Incremental based on this SCN END LOOP; EXCEPTION WHEN others THEN cacheBsRecTable.hint := noHint; deb(DEB_PRINT, 'caught an exception during getIncrementalScn'); deb(DEB_EXIT, substr(sqlerrm, 1, 512)); raise; END getIncrementalScn; ------------------------------ getIncrementalScn ------------------------------ FUNCTION getIncrementalScn( file# IN number ,create_scn IN number ,reset_scn IN number ,reset_time IN date ,incr_level IN number ,cumulative IN number ,sourcemask IN number DEFAULT NULL ,tag IN varchar2 DEFAULT NULL ,pluginSCN IN number DEFAULT 0) RETURN number IS rcvRec rcvRec_t; BEGIN getIncrementalScn( first => TRUE ,file# => file# ,create_scn => create_scn ,reset_scn => reset_scn ,reset_time => reset_time ,incr_level => incr_level ,cumulative => cumulative ,rcvRec => rcvRec ,sourcemask => sourcemask ,tag => tag ,pluginSCN => pluginSCN); IF (findDatafileBackup_c%ISOPEN) THEN CLOSE findDatafileBackup_c; -- close the one opened in getIncrementalScn END IF; RETURN rcvRec.toSCN_act; END getIncrementalScn; -------------------------------------- -- Recovery Functions and Procedures -- --------------------------------------- ------------------------ setComputeRecoveryActionMasks ------------------------ PROCEDURE setComputeRecoveryActionMasks( containerMask IN number ,actionMask IN number ,allRecords IN number ,availableMask IN binary_integer ,fullBackups IN number DEFAULT NULL) IS BEGIN deb(DEB_ENTER, 'setComputeRecoveryActionMasks'); IF (allRecords = FALSE# AND fullBackups IS NULL) THEN computeRA_fullBackups := 1; ELSE computeRA_fullBackups := fullBackups; END IF; getRA_containerMask := containerMask; getRA_actionMask := actionMask; computeRA_allRecords := allRecords; computeRA_availableMask := availableMask; -- If restoreSource isn't set, then derive a restore source from -- containerMask which is used to filter the unnecessary rows -- returned by rcvRecCursor1_c. -- IF (restoreSource IS NULL) THEN restoreSource := proxyCopy_con_t + imageCopy_con_t + backupSet_con_t; IF (bitand(getRA_containerMask, proxyCopy_con_t) = 0) THEN restoreSource := restoreSource - proxyCopy_con_t; END IF; IF (bitand(getRA_containerMask, imageCopy_con_t) = 0) THEN restoreSource := restoreSource - imageCopy_con_t; END IF; IF (bitand(getRA_containerMask, backupSet_con_t) = 0) THEn restoreSource := restoreSource - backupSet_con_t; END IF; IF (restoreSource = 0) THEN restoreSource := NULL; END IF; END IF; deb(DEB_EXIT); END setComputeRecoveryActionMasks; -- Obsolete as of 8.1.7 ------------------------ setComputeRecoveryActionMasks ------------------------ PROCEDURE setComputeRecoveryActionMasks( containerMask IN number ,actionMask IN number ,allRecords IN number) IS BEGIN deb(DEB_ENTER, 'setComputeRecoveryActionMasks816'); setComputeRecoveryActionMasks( containerMask => containerMask, actionMask => actionMask, allRecords => allRecords, availableMask => dbms_rcvman.BSavailable, fullBackups => to_number(null)); deb(DEB_EXIT); END setComputeRecoveryActionMasks; -- Obsolete as of 8.1.6 ---------------------------------- setRAflags --------------------------------- PROCEDURE setRAflags( kindMask IN number ,allRecords IN boolean) IS containerMask number; actionMask number; allRecs number; BEGIN deb(DEB_ENTER, 'setRAflags'); -- Set container mask containerMask := 0; IF (bitand(kindMask, implicitOfflRange + cleanRange + applyOfflRange) > 0) THEN containerMask := containerMask + offlineRangeRec_con_t; END IF; IF (bitand(kindMask, dfCopy) > 0) THEN containerMask := containerMask + imageCopy_con_t; END IF; IF (bitand(kindMask, buSet + applyIncremental) > 0) THEN containerMask := containerMask + backupSet_con_t; END IF; IF (bitand(kindMask, proxyFull) > 0) THEN containerMask := containerMask + proxyCopy_con_t; END IF; -- Set Action Mask actionMask := 0; IF (bitand(kindMask, dfCopy + ProxyFull + buSet) > 0) THEN actionMask := actionMask + full_act_t; END IF; IF (bitand(kindMask, applyIncremental) > 0) THEN actionMask := actionMask + incremental_act_t; END IF; IF (bitand(kindMask, redo) > 0) THEN actionMask := actionMask + redo_act_t; END IF; IF (bitand(kindMask, implicitOfflRange) > 0) THEN actionMask := actionMask + implicitRange_act_t; END IF; IF (bitand(kindMask, cleanRange) > 0) THEN actionMask := actionMask + cleanRange_act_t; END IF; IF (bitand(kindMask, applyOfflRange) > 0) THEN actionMask := actionMask + offlineRange_act_t; END IF; IF (allRecords) THEN allRecs := TRUE#; ELSE allRecs := FALSE#; END IF; deb(DEB_PRINT, 'setRAflags kindMask=' || to_char(kindMask) || ' containerMask=' || to_char(containerMask) || ' actionMask=' || to_char(actionMask)); setComputeRecoveryActionMasks(containerMask, actionMask, allRecs); deb(DEB_EXIT); END setRAflags; ------------------------------ getRecoveryAction ------------------------------ FUNCTION getRecoveryAction( action OUT NOCOPY rcvRec_t) RETURN binary_integer IS redoAction rcvRec_t; local rcvRec_t; top rcvRec_t; BEGIN deb(DEB_ENTER, 'getRecoveryAction'); <> IF (rcvRecStack.count = 0) THEN -- Signal to krmk we have reached the end. -- We usually only get here if filtering, in which case we cannot -- always predict when we have reached the last row. -- -- pre-8.1.3 rman does not tell us to filter out any records (via -- the RA flags), so we should not get here. If we do then it is -- a protocol error, so we really should signal an error. -- However, there is no way to signal an internal error from a -- package. So just raise the no-data-found condition. -- This will cause the pre-8.1.3 RMAN to signal an error because -- it does not tolerate any errors from getRecoveryAction. deb(DEB_EXIT, 'with no more records'); raise no_data_found; END IF; rcvRecPop(local); -- See if we want this action kind or not IF (not isValidAction(local)) THEN IF (debug) THEN printRcvRec(local); END IF; goto getNext; END IF; <> IF (rcvRecStack.count > 0) THEN -------------------------------------------------------- -- See if the next action can be merged with this one -- -------------------------------------------------------- rcvRecTop(top); IF (local.type_act = redo_act_t AND top.type_act = redo_act_t) THEN -- Two contiguous redo actions can always be merged. We know they -- must be from the same incarnation because an offline range -- action always appears between 2 different incarnations. redoAction := local; rcvRecPop(local); local.fromSCN_act := redoAction.fromSCN_act; GOTO merge_actions; END IF; action := local; rcvRec_last := local; deb(DEB_EXIT, 'with: TRUE#'); RETURN TRUE#; -- more actions to return yet ELSE action := local; rcvRec_last := local; deb(DEB_EXIT, 'with: FALSE#'); RETURN FALSE#; -- this is the last action END IF; END getRecoveryAction; -- Obsolete as of 8.1.6 ------------------------------ getRecoveryAction ------------------------------ FUNCTION getRecoveryAction( kind OUT number ,set_stamp OUT number ,set_count OUT number ,recid OUT number ,stamp OUT number ,fname OUT varchar2 ,blocksize OUT number ,blocks OUT number ,devtype OUT varchar2 ,from_scn OUT number ,to_scn OUT number ,to_time OUT date ,rlgscn OUT number ,rlgtime OUT date ,cfcretime OUT date ,dbinc_key OUT number) RETURN binary_integer IS rcvRec rcvRec_t; rc binary_integer; BEGIN deb(DEB_ENTER, 'getRecoveryAction815'); rc := getRecoveryAction(rcvRec); IF (rcvRec.type_con = offlineRangeRec_con_t) THEN IF (rcvRec.type_act = offlineRange_act_t) THEN kind := applyOfflRange; ELSIF (rcvRec.type_act = cleanRange_act_t) THEN kind := cleanRange; ELSIF (rcvRec.type_act = implicitRange_act_t) THEN kind := implicitOfflRange; ELSE deb(DEB_PRINT, 'cannot convert type_con=' || to_char(rcvRec.type_con) || ' type_act=' || to_char(rcvRec.type_act) || ' to recovery action kind'); deb(DEB_EXIT, 'with error 20999'); raise_application_error(-20999, 'internal error: getRecoveryAction'); END IF; ELSIF (rcvRec.type_con = backupSet_con_t) THEN IF (rcvRec.type_act = full_act_t) THEN kind := buSet; ELSE kind := applyIncremental; END IF; ELSIF (rcvRec.type_con = proxyCopy_con_t) THEN kind := proxyFull; ELSIF (rcvRec.type_con = imageCopy_con_t) THEN kind := dfCopy; ELSIF (rcvRec.type_con IS NULL) THEN IF (rcvRec.type_act = redo_act_t) THEN kind := redo; END IF; END IF; deb(DEB_PRINT, 'getRecoveryAction: kind=' || nvl(to_char(kind), 'null')); rcvRecConvert(rcvRec); -- get rid of nulls IF (debug) THEN printRcvRec(rcvRec); END IF; set_stamp := rcvRec.setStamp_con; set_count := rcvRec.setCount_con; IF (rcvRec.type_con = backupSet_con_t) THEN recid := rcvRec.bsRecid_con; stamp := rcvRec.bsStamp_con; ELSE recid := rcvRec.recid_con; stamp := rcvRec.stamp_con; END IF; fname := rcvRec.fileName_con; blocksize := rcvRec.blockSize_con; blocks := rcvRec.blocks_con; devtype := rcvRec.deviceType_con; from_scn := rcvRec.fromSCN_act; to_scn := rcvRec.toSCN_act; to_time := rcvRec.toTime_act; -- null OK rlgscn := rcvRec.rlgSCN_act; rlgtime := rcvRec.rlgTime_act; cfcretime := rcvRec.cfCreationTime_con; -- null OK dbinc_key := rcvRec.dbincKey_act; -- null OK deb(DEB_EXIT, 'with: '||to_char(rc)); RETURN rc; END; ----------------------------- printRecoveryActions ---------------------------- PROCEDURE printRecoveryActions IS action rcvRec_t; rc number; BEGIN IF (not debug) THEN return; END IF; deb(DEB_PRINT, '===== ' || to_char(rcvRecStack.count) || ' actions stacked ====='); IF (rcvRecStack.count = 0) THEN return; END IF; LOOP rc := getRecoveryAction(action); printRcvRec(action); EXIT WHEN rc = FALSE#; END LOOP; END printRecoveryActions; ----------------------------- trimRecoveryActions ----------------------------- PROCEDURE trimRecoveryActions( maxActions IN number ,containerMask IN number ,actionMask IN number) IS n number; BEGIN deb(DEB_ENTER, 'trimRecoveryActions[procedure]'); n := trimRecoveryActions(maxActions, containerMask, actionMask); deb(DEB_PRINT, 'trimRecoveryActions[procedure] returned '||n); deb(DEB_EXIT); END trimRecoveryActions; --------------------- -- Report Obsolete -- --------------------- ----------------------------- reportTranslateDFDel ---------------------------- PROCEDURE reportTranslateDFDel IS BEGIN deb(DEB_ENTER, 'reportTranslateDFDel'); IF (rddf%isopen) THEN CLOSE rddf; END IF; deb(DEB_OPEN, 'rddf'); OPEN rddf; deb(DEB_EXIT); END reportTranslateDFDel; -- pre-8.1.5 version. Discards unused out variables and PROXY rows. -------------------------------- reportGetDFDel ------------------------------- FUNCTION reportGetDFDel( file# OUT number ,filetype OUT number ,checkpoint_change# OUT number ,checkpoint_time OUT date ,resetlogs_change# OUT number ,resetlogs_time OUT date ,incremental_change# OUT number ,fuzzy_change# OUT number ,recid OUT number ,stamp OUT number ,fname OUT varchar2 ,restorable OUT number) RETURN number IS rc number; mytype number; key number; completion_time date; BEGIN deb(DEB_ENTER, 'reportGetDFDel80'); <> rc := reportGetDFDel( file# ,mytype ,checkpoint_change# ,checkpoint_time ,resetlogs_change# ,resetlogs_time ,incremental_change# ,fuzzy_change# ,recid ,stamp ,fname ,restorable ,key ,completion_time); IF (rc = TRUE#) THEN IF (mytype = PROXY) THEN GOTO get_next; END IF; filetype := mytype; END IF; deb(DEB_EXIT, 'with: '||to_char(rc)); RETURN rc; END reportGetDFDel; ------------------------- -- RMAN Configuration --- ------------------------- -------------------------------- getConfig ------------------------------------ PROCEDURE getConfig( conf# OUT number ,name IN OUT varchar2 ,value IN OUT varchar2 ,first IN boolean) IS eof boolean := FALSE; conf_exist number := 0; primary_db_unique_name varchar2(30); BEGIN IF (first) THEN IF (findConfig_c%ISOPEN) THEN CLOSE findConfig_c; END IF; OPEN cntConfig_c; FETCH cntConfig_c INTO conf_exist; CLOSE cntConfig_c; IF conf_exist > 0 THEN IF user_db_unique_name is not null THEN deb(DEB_PRINT, 'getConfig: configurations exists for user site'); OPEN findConfig_c(name, value, user_db_unique_name); ELSE deb(DEB_PRINT, 'getConfig: configurations exists for this site'); OPEN findConfig_c(name, value, this_db_unique_name); END IF; ELSE OPEN getPrimarySite_c; FETCH getPrimarySite_c INTO primary_db_unique_name; IF getPrimarySite_c%NOTFOUND THEN deb(DEB_PRINT, 'getConfig: no/multiple primary/site conf'); CLOSE getPrimarySite_c; RAISE no_data_found; END IF; CLOSE getPrimarySite_c; deb(DEB_PRINT, 'getConfig: using primary configurations'); OPEN findConfig_c(name, value, primary_db_unique_name); END IF; END IF; FETCH findConfig_c INTO conf#, name, value; IF (findConfig_c%NOTFOUND) THEN eof := TRUE; CLOSE findConfig_c; END IF; IF (eof) THEN --- if end of fetch RAISE no_data_found; END IF; END getConfig; -------------------------------------- -- Add corruption table to BMR list -- -------------------------------------- ----------------------------- bmraddcorrupttable------------------------------- PROCEDURE bmrAddCorruptTable( dfnumber OUT number ,blknumber OUT number ,range OUT number ,first IN boolean) IS eof boolean := FALSE; BEGIN IF (first) THEN IF (translateDatabaseCorruption_c%ISOPEN) THEN CLOSE translateDatabaseCorruption_c; END IF; OPEN translateDatabaseCorruption_c(dfnumber => NULL); END IF; FETCH translateDatabaseCorruption_c INTO dfnumber, blknumber, range; IF (translateDatabaseCorruption_c%NOTFOUND) THEN eof := TRUE; CLOSE translateDatabaseCorruption_c; END IF; IF (eof) THEN --- if end of fetch RAISE no_data_found; END IF; END bmrAddCorruptTable; -- EDIT HERE ------------------ -- Version Info -- ------------------ -- Return all the protocol versions that we support, one at a time. -- Return them in ascending version number order. ------------------------------ getPackageVersion ------------------------------ FUNCTION getPackageVersion RETURN varchar2 IS BEGIN deb(DEB_ENTER, 'getPackageVersion'); IF (versionCounter > versionMaxIndex) THEN versionCounter := 1; deb(DEB_EXIT, 'with: NULL'); RETURN NULL; END IF; versionCounter := versionCounter + 1; deb(DEB_EXIT, 'with: '||versionList(versionCounter - 1)); RETURN versionList(versionCounter - 1); END getPackageVersion; FUNCTION isStatusMatch(status IN VARCHAR2, mask IN NUMBER) RETURN NUMBER IS BEGIN -- BSpartial_avail is a backupset validation mask and NOT a backuppiece -- filter. For eg. BSpartial_avail + BSavailable + BSexpired means -- select 'A' and 'X' pieces but validate the pieces in such a way -- partial backupset succeeds. (see findValidBackupSet_c cursor) IF (bitand(mask, BSavailable) != 0 AND status = 'A') OR (bitand(mask, BSunavailable) != 0 AND status = 'U') OR (bitand(mask, BSdeleted) != 0 AND status = 'D') OR (bitand(mask, BSexpired) != 0 AND status = 'X') THEN RETURN TRUE#; ELSE RETURN FALSE#; END IF; END isStatusMatch; ----------------------------- isBackupTypeMatch ------------------------------- FUNCTION isBackupTypeMatch(btype IN VARCHAR2, mask IN binary_integer) RETURN NUMBER IS BEGIN IF (bitand(mask, BSdatafile_full) !=0 AND btype = 'D') OR (bitand(mask, BSdatafile_incr) !=0 AND btype = 'I') OR (bitand(mask, BSarchivelog) !=0 AND btype = 'L') THEN RETURN TRUE#; ELSE RETURN FALSE#; END IF; END isBackupTypeMatch; ----------------------------- setRcvRecBackupAge ------------------------------ PROCEDURE setRcvRecBackupAge(age IN number) IS BEGIN rcvRecBackupAge := age; deb(DEB_PRINT, 'rcvRecBackupAge= ' || rcvRecBackupAge); resetthisBackupAge; END setRcvRecBackupAge; ----------------------------- resetthisBackupAge ------------------------------ PROCEDURE resetthisBackupAge IS BEGIN thisBackupAge := 0; deb(DEB_PRINT, 'thisBackupAge= ' || thisBackupAge); END resetthisBackupAge; --------------------------------- printLbRec --------------------------------- PROCEDURE printLbRec( lbRec IN lbRec_t) IS BEGIN deb(DEB_ENTER, 'printLbRec'); deb(DEB_IN, 'fetch backup_type: '||lbRec.backup_type); deb(DEB_IN, ' file_type: '||lbRec.file_type); deb(DEB_IN, ' pkey: '||lbRec.pkey); deb(DEB_IN, ' recid: '||lbRec.recid); deb(DEB_IN, ' stamp: '||lbRec.stamp); deb(DEB_IN, ' is_rdf: '||lbRec.is_rdf); IF (lbRec.file_type = datafile_txt) THEN deb(DEB_IN, ' df_file#: '||lbRec.df_file#); deb(DEB_IN, ' df_creation_change#:'||lbRec.df_creation_change#); deb(DEB_IN, ' df_checkpoint_change#:'||lbRec.df_checkpoint_change#); deb(DEB_IN, ' df_incremental_change#:'|| nvl(to_char(lbRec.df_incremental_change#), 'NULL')); END IF; IF (lbRec.file_type = archivedlog_txt) THEN deb(DEB_IN, ' rl_thread#: '||lbRec.rl_thread#); deb(DEB_IN, ' rl_sequence#: '||lbRec.rl_sequence#); deb(DEB_IN, ' rl_next_change#:'||lbRec.rl_next_change#); END IF; IF (lbRec.backup_type = backupset_txt) THEN deb(DEB_IN, ' bs_key: '||lbRec.bs_key); deb(DEB_IN, ' bs_stamp: '||lbRec.bs_stamp); deb(DEB_IN, ' bs_count: '||lbRec.bs_count); deb(DEB_IN, ' bs_incr_type: '||lbRec.bs_incr_type); END IF; IF (lbRec.file_type = piece_txt) THEN deb(DEB_IN, ' bp_piece#: '||lbRec.bp_piece#); deb(DEB_IN, ' bp_copy#: '||lbRec.bp_copy#); deb(DEB_IN, ' status: '||lbRec.status); deb(DEB_IN, ' device_type: '||lbRec.device_type); deb(DEB_IN, ' tag: '||lbRec.tag); END IF; deb(DEB_EXIT, 'ok'); EXCEPTION WHEN OTHERS THEN deb(DEB_EXIT, 'with exception: '||substr(sqlerrm, 1, 512)); RETURN; END printLbRec; ------------------------------- listBackupInMKS ------------------------------- -- Is datafile checkpoint_change# greater than must keep scn? FUNCTION listBackupInMKS(lbDfRecTabUs IN lbDfRecTab_t ,lbRec IN lbRec_t ,maxDfNumber IN number ,forIncr IN boolean) RETURN BOOLEAN IS i number; min_scn number; min_rlgscn number; BEGIN -- The table lbDfRecTabUs is indexed by file#, so there has to be at -- one slot with index N*file# in the lbDfRecTab with the file from -- lbRec. i := lbRec.df_file#; <> LOOP IF (NOT lbDfRecTabUs.exists(i)) THEN RETURN FALSE; END IF; IF (lbDfRecTabUs(i).dfRec.dfNumber = lbRec.df_file# AND (lbRec.df_file# = 0 OR lbDfRecTabUs(i).dfRec.dfCreationSCN = lbRec.df_creation_change#)) THEN IF (forIncr) THEN min_scn := lbDfRecTabUs(i).incrmin_scn; min_rlgscn := lbDfRecTabUs(i).incrmin_rlgscn; ELSE min_scn := lbDfRecTabUs(i).fullmin_scn; min_rlgscn := lbDfRecTabUs(i).fullmin_rlgscn; END IF; IF (min_scn < lbRec.df_checkpoint_change# AND (min_rlgscn IS NULL OR min_rlgscn = lbRec.df_resetlogs_change# OR min_scn <= lbRec.df_resetlogs_change#)) THEN RETURN TRUE; ELSE RETURN FALSE; END IF; END IF; i := i + maxDfNumber; END LOOP; RETURN FALSE; END listBackupInMKS; ----------------------------- listBackupProcessPiece -------------------------- PROCEDURE listBackupProcessPiece(lbRec IN lbRec_t ,lbRecOut OUT NOCOPY lbRec_t ,lbState IN OUT NOCOPY lbState_t) IS BEGIN IF (debug) THEN -- protect for performance deb(DEB_ENTER, 'listBackupProcessPiece'); END IF; -- -- Do the filtering here for backuppiece -- -- 1. Requested only recovery area files. -- 2. Specified device type. -- IF (anyDevice = TRUE# OR isDeviceTypeAllocated(lbRec.device_type) = TRUE#) THEN IF (recoveryDestFile and lbRec.is_rdf = 'NO') THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'file_type: ' || lbRec.file_type || ' not a recovery area file pkey: ' || lbRec.pkey); END IF; ELSE -- The variable lbCopyCount contains the maximum copy number. IF (lbRec.bp_copy# > lbState.lbCopyCount) THEN lbState.lbCopyCount := lbRec.bp_copy#; END IF; -- The table lbPieceCountTab contains number of pieces for each -- copy. BEGIN lbState.lbPieceCountTab(lbRec.bp_copy#-1) := lbState.lbPieceCountTab(lbRec.bp_copy#-1) + 1; EXCEPTION WHEN no_data_found THEN -- lbPieceCountTab(i) uninitialized lbState.lbPieceCountTab(lbRec.bp_copy#-1) := 1; -- Because lbPieceCountTab(i) is untilialized, then it means -- that this is a new copy, so add it total number of copies. lbState.lbRecCmn.bs_copies := lbState.lbRecCmn.bs_copies + 1; END; -- -- Determine the status of the backup set. IF (lbState.lbRecCmn.bs_status is NULL) THEN lbState.lbRecCmn.bs_status := lbRec.status; ELSIF (lbRec.status <> lbState.lbRecCmn.bs_status) THEN -- Status of this piece is different than status of other pieces, -- then change status of the backup set to 'OTHER'. lbState.lbRecCmn.bs_status := other_txt; END IF; -- -- Update bytes. Note that lbRecCmn.bs_bytes represent a total number -- of bytes including all copies. Later on, we will device this with -- number of copies. lbState.lbRecCmn.bs_bytes := lbState.lbRecCmn.bs_bytes + lbRec.bytes; -- -- Process device_type. IF (lbState.lbRecCmn.bs_device_type is NULL) THEN lbState.lbRecCmn.bs_device_type := lbRec.device_type; ELSIF (instr(lbState.lbRecCmn.bs_device_type, lbRec.device_type) = 0) THEN BEGIN lbState.lbRecCmn.bs_device_type := lbState.lbRecCmn.bs_device_type||','||lbRec.device_type; EXCEPTION WHEN value_error THEN deb(DEB_IN, 'dev buffer overflow length=' || lengthb(lbState.lbRecCmn.bs_device_type)); END; END IF; -- -- Process compressed. IF (lbState.lbRecCmn.bs_compressed is NULL) THEN lbState.lbRecCmn.bs_compressed := lbRec.compressed; ELSIF (lbState.lbRecCmn.bs_compressed != lbRec.compressed) THEN lbState.lbRecCmn.bs_compressed := '###'; END IF; -- -- Process tag. IF (lbRec.tag IS NOT NULL) THEN IF (lbState.lbRecCmn.bs_tag is NULL) THEN lbState.lbRecCmn.bs_tag := lbRec.tag; ELSIF (instr(lbState.lbRecCmn.bs_tag, lbRec.tag) = 0) THEN BEGIN lbState.lbRecCmn.bs_tag := lbState.lbRecCmn.bs_tag||','||lbRec.tag; EXCEPTION WHEN value_error THEN deb(DEB_IN, 'tag buffer overflow length=' || lengthb(lbState.lbRecCmn.bs_tag)); eND; END IF; END IF; END IF; ELSE IF (debug) THEN deb(DEB_IN, 'device type not allocated'); END IF; END IF; IF (debug) THEN -- protect for performance deb(DEB_EXIT, 'OK'); END IF; END listBackupProcessPiece; PROCEDURE setNeedObsoleteData(NeedObsoleteData IN boolean DEFAULT TRUE) IS BEGIN IF NeedObsoleteData THEN lb_NeedObsoleteData := TRUE#; ELSE lb_NeedObsoleteData := FALSE#; END IF; END; --------------------------------- listBackup ---------------------------------- FUNCTION listBackup(lbRecOut OUT NOCOPY lbRec_t ,firstCall IN boolean ,only_obsolete IN boolean ,redundancy IN number ,piped_call IN boolean -- not called by RMAN client ,lbCursor IN OUT NOCOPY lbCursor_t ,lbState IN OUT NOCOPY lbState_t ,extRlKeepSCN IN number DEFAULT NULL) RETURN boolean IS lbRec lbRec_t; null_lbRec lbRec_t := NULL; i binary_integer; j binary_integer; tmp binary_integer; rc binary_integer; found boolean; lbCursor_notfound boolean := FALSE; numBackups number; oldest_flashback_scn number; fullBackups number; actionMask number; -- The following variables are used for shorthand full_df_backup boolean; incr_df_backup boolean; arc_log_backup boolean; keep varchar2(3); keep_until date; -- The following variables are used for computeRecoveryAction. save_dbinc_key number; save_reset_scn number; dfRec dfRec_t; rcvRec rcvRec_t; rcvRecStack_count binary_integer; extendMask binary_integer; BEGIN -- -- The function has the following algorithm: -- 0. If this is first call to the function (firstCall is TRUE), we will -- reset all global variable used by this function. -- 1. The function also list obsolete backups. So, when firstCall is TRUE, -- we will prepare everything needed for reporting obsolete backup files: -- 1.1. Create the list of database files which existed after untilSCN -- and store it in the lbDfRecTabUs collection table. -- 1.2. Call computeRecoveryAction and add non-redundant actions to the -- must-keep list. If the action has keep attributes but needed -- to satisfy retention policy, then add it to must-keep list. -- 1.3. For each file from lbDfRecTabUs calculate fullmin_scn, incrmin_scn -- logmin_scn and its corresponding rlgscn. All full backups newer -- newer than fullmin_scn with resetlogs_change# equal to -- fullmin_rlgscn or newer than fullmin_scn should be kept. -- Similarly, incrementals uses incrmin_scn, incrmin_rlgscn and -- archived logs uses logmin_scn, logmin_rlgscn. -- We will also calculate lbRlKeepSCN as min(logmin_scn) -- which says that all archived logs newer than lbRlKeepSCN should -- be kept. Similarly, lbRlKeepRlgSCN as min(logmin_rlgscn). -- 2. Open cursor listBackup_c. This cursor lists all backup sets, backup -- datafiles, pieces, copies and proxy copies. -- 3. If lbRecOutTab is empty, then fetch a row from cursor listBackup_c -- into lbRec and do the following: -- 3.1. If cursor returned %NOTFOUND or lbRec type is backupset or copy -- and the variables lbObsoleteRetention and lbObsoleteKeep are not -- FALSE, then move all data from lbRecTmpTab into lbRecOutTab. -- 3.2. If the cursor returns %NOTFOUND, then goto the step 4. -- 3.3. Then we calculate whether this backup is obsolete or not: -- 3.3.1. For shorthand, we determine the type of backup. -- It can be full datafile backup, incremental datafile -- backup. We also save the keep attributes of backup. -- 3.3.2. If this backup has keep attributes, then check keep_until. -- If until time is not expired, then it is not obsolete. -- If until time is expired and in must keep list or its -- checkpoint_change# greater than must keep scn, then this -- is not an obsolete backup. -- 3.3.3. If this is a full backup and does not have keep attributes, -- then it is not obsolete only if it is outside recovery -- window and in the must-keep list or its checkpoint_change# -- greater than must keep scn. Also, if the backup is inside -- recover window, then it is not obsolete. -- 3.3.4. If this is an incremental backups and checkpoint change -- is greater than incrmin_scn and resetlogs_change# -- greater than incrmin_scn or equal to incrmin_rlgscn, -- then this backup file can not be obsolete. So set -- lbObsoleteRetention to FALSE. -- 3.3.5. If lbRec is an archived log or its backup and next_change -- is greater than lbRlKeepSCN and its resetlogs_change# is -- greater than lbRlKeepSCN or equal to lbRlKeepRlgSCN, -- then this backup file can not be obsolete. If its next_time -- is greater than lbFbUntilTime or its next_change is -- greater then lbFbUntilTime, then this backup file cannot -- be obsolete. So set lbObsoleteRetention to FALSE. -- 3.4 If both lbObsoleteRetention and lbObsoleteKeep are FALSE, then -- this backup file is not obsolete, so just return TRUE. -- 3.4. If lbRec.file_type is backup datafile, then find the corresponding -- file name and put it into lbRec.fname. -- 3.5. If lbRec.backup_type is backup set then do the following: -- 3.5.1. If lbRec.file_type is a backup set, then delete the -- collection table lbRecTmpTab and reset the piece counters. -- The lbRec should contain all meta-data about this backup -- set, so we will store that data into lbRecCmn. -- 3.5.2. If lbRec.file_type is a piece, then verify availability of -- the piece and update lbRecCmn.status. Also, increase -- lbPieceCountTab which is used to determine whether all -- pieces are in the backup set. Add lbRec to the lbRecTmpTab -- collection table. -- 3.5.3. If lbRec.file_type is datafile/controlfile/spfile, -- then just add it to the lbRecTmpTab collection table. -- 3.6. If lbRec.backup_type is some kind of a copy, then add it to the -- lbRecOutTab. -- 4. If lbRecOutTab is not empty, then return the next element from -- lbRecOutTab and decrease lbRecOutTab_count. -- 5. If we reached the end of listBackup_c and lbRecOutTab is empty, then -- return FALSE. Otherwise, return TRUE. -- IF (debug) THEN -- protect for performance deb(DEB_ENTER, 'listBackup'); END IF; -- Init the output record. lbRecOut := NULL; IF (firstCall) THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'FIRST'); END IF; -- Database and its incarnation must be set. validateState(NULL); -- Always work with all incarnations. setAllIncarnations(TRUE); -- ListBackup is introduced in 10i. So, we can apply redo from -- non-current incarnation. This will make v$obsolete_backup_files and -- recovery area algorithm to honor redo from non-current incarnation. -- setCanApplyAnyRedo(TRUE); setCanHandleTransportableTbs(TRUE); -- Hint cacheBsRecTable that redundant cache is what we need cacheBsRecTable.hint := redundantHint; -- Init lbRecOutTab lbState.lbRecOutTab.delete; lbState.lbRecOutTab_count := 0; -- Init lbRecTmpTab lbState.lbRecTmpTab.delete; -- lbDfRecTabUs is table which contains all possible datafiles -- created. Uninitialize the table to start with. lbState.lbDfRecTabUs.delete; -- Init lbDfRecTab lbState.lbDfRecTab.delete; -- lbPieceCountTab is table with contains number of pieces for each -- copy number. Uninitialize the table to start with. It is initialized -- when exists returns FALSE or when no_data_found exception is raised. lbState.lbPieceCountTab.delete; lbState.lbCopyCount := 0; -- Init lbMkTab lbState.lbMkTab.delete; -- Init lbMkITab lbState.lbMkITab.delete; -- We need to know the current time. -- Removed TO_DATE(SYSDATE) for 2 reasons. 1) bug# 3216912 2) unncessary SELECT SYSDATE INTO lbState.lbNowTime from dual; -- Get list of database files which existed since untilSCN and store in -- the lbDfRecTabUs collection table. The first element will be the -- control file which always have creation SCN of 0. -- In case that recovery window spans resetlogs, then we have to get also -- files which existed only in some of the parent incarnations. So, we -- will call getParentIncarnation until we don't reach resetlogs before -- untilSCN. So we call translateDatabase for each parent incarnation. dfRec.dfNumber := 0; dfRec.dfCreationSCN := 0; dfRec.inBackup := 1; dfRec.foreignDbid := 0; dfRec.pluggedRonly := 0; dfRec.pluginSCN := 0; lbState.lbDfRecTabUs(0).dfRec := dfRec; lbState.lbDfRecTabUs(0).fullmin_scn := MAXSCNVAL; lbState.lbDfRecTabUs(0).fullmin_rlgscn := MAXSCNVAL; lbState.lbDfRecTabUs(0).incrmin_scn := MAXSCNVAL; lbState.lbDfRecTabUs(0).incrmin_rlgscn := MAXSCNVAL; lbState.lbDfRecTabUs(0).logmin_scn := MAXSCNVAL; lbState.lbDfRecTabUs(0).logmin_rlgscn := MAXSCNVAL; IF lb_NeedObsoleteData = TRUE# THEN lbState.lbNeedObsoleteData := TRUE; ELSE lbState.lbNeedObsoleteData := FALSE; deb(DEB_PRINT,'listBackup:caller not interested in Obsolete Data'); END IF; -- save original this_dbinc_key and this_reset_scn save_dbinc_key := this_dbinc_key; save_reset_scn := this_reset_scn; -- Get the max number of files. This number will be used for indexing. lbState.lbMaxDfNumber := getMaxDfNumber; <> LOOP translateDatabase(TRUE#); <> LOOP BEGIN getDatafile(dfRec); EXCEPTION WHEN no_data_found THEN EXIT loop_genDfRecTab; END; -- Now, add file to the lbDfRecTabUs collection table. We will add -- the file only if the file is unique. The table lbDfRecTabUs is -- indexed by the coresponding file number. -- So the file with number X can be in the position x, -- X+lbMaxDfNumber, X+2*lbMaxDfNumber, and so on. j := dfRec.dfNumber; <> LOOP BEGIN -- In case that dfRecTab(j) is not occupied, the no_data_found -- exception will be raised and we will fill up the dfRecTab(j). -- On the other side, dfRecTab(j) migh be occupied with useless -- data. So, we will check if dfRecTab(j) has valid dfNumber. If -- the file number is bogus we will raise no_data_found. IF (lbState.lbDfRecTabUs(j).dfRec.dfNumber = 0) THEN RAISE no_data_found; END IF; IF (dfRec.dfNumber = lbState.lbDfRecTabUs(j).dfRec.dfNumber AND dfRec.dfCreationSCN = lbState.lbDfRecTabUs(j).dfRec.dfCreationSCN) THEN -- This is a duplicate entry, so nothing to do. EXIT loop_scanDfRecTab; ELSE -- In case that this is not the same datafile (creation time -- differs), then increase index j and go on. j := j + lbState.lbMaxDfNumber; END IF; EXCEPTION WHEN no_data_found THEN lbState.lbDfRecTabUs(j).dfRec := dfRec; lbState.lbDfRecTabUs(j).fullmin_scn := MAXSCNVAL; lbState.lbDfRecTabUs(j).fullmin_rlgscn := MAXSCNVAL; lbState.lbDfRecTabUs(j).logmin_scn := MAXSCNVAL; lbState.lbDfRecTabUs(j).logmin_rlgscn := MAXSCNVAL; lbState.lbDfRecTabUs(j).incrmin_scn := MAXSCNVAL; lbState.lbDfRecTabUs(j).incrmin_rlgscn := MAXSCNVAL; END; END LOOP; END LOOP; -- Here, we are done with this incarnation. -- In case that untilSCN is before resetlogs when this incarnation -- was branched, then go to the parent incarnation and get all the -- files from that incarnation newer than untilSCN. IF (untilSCN IS NOT NULL AND untilSCN < this_reset_scn) THEN rc := getParentIncarnation(this_dbinc_key, this_reset_scn); EXIT loop_travelinc WHEN rc = FALSE#; ELSE EXIT loop_travelinc; END IF; END LOOP; -- restore original this_dbinc_key and this_reset_scn this_dbinc_key := save_dbinc_key; this_reset_scn := save_reset_scn; -- initialize keep scn and time to maximum values if not driven by user getFlashbackInfo(lbState.lbFbUntilTime, lbState.lbMinGrsp); IF (debug) THEN deb(DEB_IN, 'lbFbUntilTime= ' || to_char(lbState.lbFbUntilTime) || ' lbMinGrsp=' || to_char(lbState.lbMinGrsp)); END IF; lbState.lbRlKeepRlgSCN := MAXSCNVAL; lbState.lbRlKeepSCN := MAXSCNVAL; IF (extRlKeepSCN IS NOT NULL) THEN lbState.lbRlKeepSCN := extRlKeepSCN; lbState.lbRlKeepRlgSCN := getPointInTimeInc(extRlKeepSCN); IF (debug) THEN deb(DEB_IN, 'Extending lbRlKeepSCN for external keepscn to '|| to_char(lbState.lbRlKeepSCN)); deb(DEB_IN, 'Extending lbRlKeepRlgSCN for external keepscn to '|| to_char(lbState.lbRlKeepRlgSCN)); END IF; END IF; -- Include incremental backupset for recovery window obsolete -- algorithm to skip un-necessary incremental backups and -- archivelogs actionMask := full_act_t + offlineRange_act_t + implicitRange_act_t + cleanRange_act_t; IF (untilTime IS NOT NULL OR untilSCN IS NOT NULL) THEN actionMask := actionMask + incremental_act_t; END IF; -- Set compute recovery mask to return available backup set, copies and -- proxy copies. We need only full backup and offline ranges (normal, -- implicit, or clean). setComputeRecoveryActionMasks(containerMask => backupSet_con_t + proxyCopy_con_t + imageCopy_con_t + offlineRangeRec_con_t, actionMask => actionMask, allRecords => TRUE#, availableMask => BSavailable, fullBackups => redundancy); -- Set craGetAllCfBackups flag to TRUE. By setting this flag to TRUE the -- compute recovery action will return also controlfile backups from -- previous incarnations. setCraGetAllCfBackups(TRUE); -- No need to call computeRecoveryAction when caller is not interested -- obsolete column value. -- IF NOT lbState.lbNeedObsoleteData THEN goto ObsoleteDataSkip; END IF; -- For each file in the list call compute recoveryAction to get stack of -- recovery actions. All full backups from the stack are added to -- lbMkTab (Musk Keep TABle). We will also calculate fullmin_scn, -- incrmin_scn, logmin_scn and its corresponding rlgscn. Also, -- lbRlKeepSCN, lbRlKeepRlgSCN for archived logs is calculated. -- All archived logs after lbRlKeepSCN with resetlogs_change# equal to -- lbRlKeepRlgSCN or greater than lbRlKeepSCN are kept. We will also -- keep all archivelogs that is needed for guaranteed restore point. -- We will also keep the the archived log backups whose next_change# -- greater or equal to lbMinGrsp but not the archived copies. This is -- because flashback to GRP1 followed by flashback to GRP2 -- (where GRP2 > GRP1) will require archivelogs outisde the range -- listed by grsp table (from_scn - to_scn column). -- -- The algorithm does not delete archived logs which are older than -- checkpoint of the older must-keep control file backup because, in -- case that you restore that control file, then it is possible that -- recovery will start from the control file checkpoint. -- -- We can assume that clean2scn is infinite because clean offline range -- which is older than checkpoint of the oldest must-keep control file -- backup will have not influence on deleting of archived logs. -- i := to_number(null); LOOP IF (i is null) THEN i := lbState.lbDfRecTabUs.first; ELSE i := lbState.lbDfRecTabUs.next(i); END IF; EXIT WHEN i IS NULL; dfRec := lbState.lbDfRecTabUs(i).dfRec; -- Call computeRecoveryActions. We can safely use offline range from -- the offfr because offline ranges are used only to determine whether -- some archived logs are obsolete or not. And the algorithm always -- keep all archived logs after checkpoint of the oldest must-keep -- control file backup. So in case that the oldest- must-keep control -- file backup is before this implicit range then the range will be -- ignored. rc := computeRecoveryActions(fno => dfRec.dfNumber, crescn => dfRec.dfCreationSCN, allowfuzzy => FALSE, partial_rcv => FALSE, allCopies => FALSE, cleanscn => dfRec.stopSCN, clean2scn => 281474976710655, clean2time => lbState.lbNowTime, onlscn => dfRec.dfOnlineSCN, offlscn => dfRec.dfOfflineSCN, onltime => dfRec.dfOnlineTime, rmanCmd => obsoleteCmd_t, foreignDbid => dfRec.foreignDbid, pluggedRonly => dfRec.pluggedRonly, pluginSCN => dfRec.pluginSCN, pluginRlgSCN => dfRec.pluginRlgSCN, pluginRlgTime => dfRec.pluginRlgTime, creation_thread => dfRec.creation_thread, creation_size => dfRec.creation_size); numBackups := 0; fullBackups := getRecFullCount; LOOP EXIT WHEN getRecStackCount = 0; rcvRecPop(rcvRec); IF (rcvRec.type_act = full_act_t) THEN IF (rcvRec.keep_options = KEEP_NO) -- nokeep backup THEN addBackupToMKL(lbState.lbMkTab, rcvRec); extendKeepSCN(lbState.lbDfRecTabUs(i), rcvRec.toSCN_act, rcvRec.rlgSCN_act, extendAllSCN, FALSE, 'extendFullBackup (NoKeep)'); numBackups := numBackups+1; -- bump the number of backups ELSIF (NVL(rcvRec.keep_until,MAXDATEVAL) > lbState.lbNowTime) -- not expired? THEN -- In case that this is a keep log backup of datafile, -- then decrease logmin_scn and incrmin_scn if needed. IF (rcvRec.keep_options = KEEP_LOGS) -- keep logs? THEN extendKeepSCN(lbState.lbDfRecTabUs(i), rcvRec.toSCN_act, rcvRec.rlgSCN_act, extendLogSCN + extendIncrSCN, FALSE, 'extendFullBackup (Keep)'); END IF; END IF; ELSIF (rcvRec.type_act = offlineRange_act_t OR rcvRec.type_act = cleanRange_act_t OR rcvRec.type_act = implicitRange_act_t) THEN extendMask := 0; IF (lbState.lbDfRecTabUs(i).fullmin_scn = rcvRec.fromSCN_act) THEN extendMask := extendMask + extendFullSCN; END IF; IF (lbState.lbDfRecTabUs(i).incrmin_scn = rcvRec.fromSCN_act) THEN extendMask := extendMask + extendIncrSCN; END IF; IF (lbState.lbDfRecTabUs(i).logmin_scn = rcvRec.fromSCN_act) THEN extendMask := extendMask + extendLogSCN; END IF; extendKeepSCN(lbState.lbDfRecTabUs(i), rcvRec.toSCN_act, rcvRec.rlgSCN_act, extendMask, TRUE, 'extendMiscRange'); ELSIF (rcvRec.type_act = incremental_act_t) THEN IF (lbState.lbDfRecTabUs(i).incrmin_scn >= rcvRec.fromSCN_act AND lbState.lbDfRecTabUs(i).incrmin_scn < rcvRec.toSCN_act) THEN addBackupToMKL(lbState.lbMkITab, rcvRec); extendKeepSCN(lbState.lbDfRecTabUs(i), rcvRec.toSCN_act, rcvRec.rlgSCN_act, extendIncrSCN, TRUE, 'extendIncrBackup'); END IF; END IF; END LOOP; -- Bug-3736736: If this is an exculded datafile do not decrease -- lbDfkeepSCNTab(i) to the dfCreationSCN. This is to prevent -- excluded datafiles with no backups from keeping all the -- archivelogs since its creation. -- Plugged-readonly file isn't involved to bump the keep scn or -- minimum resetlogs scn as it is a foreign file and doesn't require -- any redo to recover it. IF ((lbState.lbDfRecTabUs(i).fullmin_scn = MAXSCNVAL OR -- no bkp numBackups < redundancy) AND -- not enough backups dfRec.inBackup = 1 AND -- file not excluded dfRec.pluggedRonly = 0) -- not a plugged readonly THEN extendKeepSCN(lbState.lbDfRecTabUs(i), dfRec.dfCreationSCN, getPointInTimeInc(dfRec.dfCreationSCN), extendAllSCN, TRUE, 'extendNoBackup'); END IF; -- If this is datafile and logmin_scn is newer than -- lbRlKeepSCN, then we have to decrease lbRlKeepSCN and lbRlKeepRlgSCN IF (lbState.lbRlKeepSCN > lbState.lbDfRecTabUs(i).logmin_scn) THEN lbState.lbRlKeepSCN := lbState.lbDfRecTabUs(i).logmin_scn; lbState.lbRlKeepRlgSCN := lbState.lbDfRecTabUs(i).logmin_rlgscn; IF (debug) THEN -- protect for performance deb(DEB_IN, 'Extending lbRlKeepSCN to '|| to_char(lbState.lbRlKeepSCN)); deb(DEB_IN, 'Extending lbRlKeepRlgSCN to '|| nvl(to_char(lbState.lbRlKeepRlgSCN), 'null')); END IF; END IF; END LOOP; -- Add redundancy number of full backup of SPFILE LOOP BEGIN -- add to Musk Keep TABle IF (getSpfileBackup(rcvRec => rcvRec, redundancy => redundancy, rmanCmd => obsoleteCmd_t) = SUCCESS) THEN addBackupToMKL(lbState.lbMkTab, rcvRec); ELSE EXIT; END IF; EXCEPTION WHEN no_data_found THEN EXIT; END; END LOOP; <> -- Get list of all database files which ever existed and store them in -- the collection table lbDfRecTab. translateAllDatafile; LOOP BEGIN getDatafile(dfRec); EXCEPTION WHEN no_data_found THEN EXIT; END; lbState.lbDfRecTab(lbState.lbDfRecTab.count) := dfRec; END LOOP; -- Open the main cursor which returns one row per backup set, backup -- pieces, backup file and copy. IF (piped_call) THEN openLbCursor(lbCursor); ELSE IF (listBackup_c%ISOPEN) THEN CLOSE listBackup_c; END IF; OPEN listBackup_c; END IF; -- Assume that this backupset or copy will *not* be obsoleted to start -- with. This is because the first record (by record timestamp) we see -- can be a v$backup_piece, v$backup_datafile or v$backup_redolog record. -- This is because v$backup_set record section can get reused before -- other dependent sections. We don't make those backups as obsolete. -- lbState.lbObsoleteRetention := FALSE; lbState.lbKeepForDbpitr := FALSE; -- Assume that this backupset or copy will *not* be obsoleted because of -- its keep attributes. lbState.lbObsoleteKeep := FALSE; -- Reset collection table lbState.lbRecTmpTab.delete; lbState.lbCopyCount := 0; lbState.lbPieceCountTab.delete; lbState.lbRecCmn := null_lbRec; IF (debug) THEN deb(DEB_IN, 'Must Keep List:'); i := lbState.lbMkTab.first; LOOP EXIT WHEN i IS NULL; FOR j in 0..lbState.lbMkTab(i).count-1 LOOP IF (lbState.lbMkTab(i)(j).type_con = backupSet_con_t) THEN deb(DEB_PRINT, 'Backup Set bskey=' || lbState.lbMkTab(i)(j).bsKey_con || ' set_stamp=' || lbState.lbMkTab(i)(j).setStamp_con || ' set_count=' || lbState.lbMkTab(i)(j).setCount_con); ELSIF (lbState.lbMkTab(i)(j).type_con = imageCopy_con_t) THEN deb(DEB_PRINT, 'Datafile Copy key=' || lbState.lbMkTab(i)(j).key_con || ' recid=' || lbState.lbMkTab(i)(j).recid_con || ' stamp=' || lbState.lbMkTab(i)(j).stamp_con); ELSIF (lbState.lbMkTab(i)(j).type_con = proxyCopy_con_t) THEN deb(DEB_PRINT, 'Proxy Backup key=' || lbState.lbMkTab(i)(j).key_con || ' recid=' || lbState.lbMkTab(i)(j).recid_con || ' stamp=' || lbState.lbMkTab(i)(j).stamp_con); ELSE deb(DEB_PRINT, 'Unknown Type=' || lbState.lbMkTab(i)(j).type_con); END IF; END LOOP; i := lbState.lbMkTab.next(i); END LOOP; END IF; -- no more redundant hint as we fetched all the files needed in -- must keep list cacheBsRecTable.hint := noHint; END IF; -- first call <> WHILE (lbState.lbRecOutTab_count = 0) -- while there is nothing to return LOOP IF (piped_call) THEN FETCH lbCursor INTO lbRec; IF (lbCursor%NOTFOUND) THEN lbCursor_notfound := TRUE; END IF; ELSE FETCH listBackup_c INTO lbRec; IF (listBackup_c%NOTFOUND) THEN lbCursor_notfound := TRUE; END IF; END IF; IF (lbCursor_notfound OR (lbRec.file_type = backupset_txt) OR (lbRec.backup_type IN (copy_txt, proxycopy_txt)) OR (lbRec.backup_type = backupset_txt AND lbRec.file_type <> backupset_txt AND (lbState.lbRecCmn.recid IS NULL OR lbState.lbRecCmn.bs_stamp <> lbRec.bs_stamp OR lbState.lbRecCmn.bs_count <> lbRec.bs_count))) THEN -- If lbRec.file_type is not a piece or datafile, then it means that this -- is a row which contains information about the next backup set (type -- of backup set, how many pieces and similar) or the row is a copy. So, -- if lbRecTmpTab count is not zero, then it we should move the previous -- backupset which is stored in lbRecTmpTab into lbRecOutTab. Of course, -- we will do this only if lbCopyCount is not zero. This is because in -- no-catalog mode a backupset does not get physically deleted -- it is -- deleted *only* when all pieces are deleted. IF (lbState.lbRecTmpTab.count > 0 AND lbState.lbCopyCount > 0 AND (NOT only_obsolete OR lbState.lbObsoleteRetention OR lbState.lbObsoleteKeep)) THEN -- The backupset is obsolete if either lbObsoleteRetention or -- lbObsoleteKeep is TRUE. Populate the value only if caller wants -- obsolete data. IF lbState.lbNeedObsoleteData THEN IF (lbState.lbObsoleteRetention OR lbState.lbObsoleteKeep) THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'E: Obsolete!!!'); END IF; lbState.lbRecCmn.obsolete := 'YES'; lbState.lbRecCmn.keep_for_dbpitr := 'NO'; ELSE IF (debug) THEN -- protect for performance deb(DEB_IN, 'E: Not obsolete!!!'); END IF; lbState.lbRecCmn.obsolete := 'NO'; IF (lbState.lbKeepForDbpitr) THEN lbState.lbRecCmn.keep_for_dbpitr := 'YES'; ELSE lbState.lbRecCmn.keep_for_dbpitr := 'NO'; END IF; END IF; END IF; -- If number of pieces does not match, then modify the status to -- OTHER. IF (lbState.lbRecCmn.status <> other_txt) THEN <> FOR i IN 0..lbState.lbCopyCount-1 LOOP BEGIN IF (lbState.lbRecCmn.bs_pieces != lbState.lbPieceCountTab(i)) THEN lbState.lbRecCmn.status := other_txt; EXIT loop_copy; END IF; EXCEPTION WHEN no_data_found THEN -- lbPieceCountTab(i) uninitialized EXIT loop_copy; END; END LOOP; END IF; -- Dump all rows which are stored in lbRecTmpTab. IF (debug) THEN -- protect for performance deb(DEB_IN, 'pipelineing backup set '||lbState.lbRecCmn.bs_key); END IF; FOR i IN 0..lbState.lbRecTmpTab.count-1 LOOP -- Move the row into lbRecOutTab. IF (recoveryDestFile AND lbState.lbRecTmpTab(i).is_rdf = 'NO') THEN -- this file doesn't exists on disk or not a recovery area file IF (debug) THEN -- protect for performance deb(DEB_IN, 'not a recovery area file type'); END IF; ELSIF (anyDevice = FALSE# AND -- not all device is allocated lbState.lbRecTmpTab(i).file_type = piece_txt AND isDevicetypeAllocated(lbState.lbRecTmpTab(i).device_type) = FALSE#) THEN -- Backupset is obsolete because there exists copy on the device -- specified by user. But, this piece is not obsolete because -- it exists on a different device. IF (debug) THEN -- protect for performance deb(DEB_IN, 'device type not allocated'); END IF; ELSE -- Before moving into lbRecOutTab, update row with the common -- parts shared will all backup files and piece belonging to -- the backup set. tmp := lbState.lbRecOutTab_count; lbState.lbRecOutTab(tmp) := lbState.lbRecTmpTab(i); lbState.lbRecOutTab(tmp).obsolete := lbState.lbRecCmn.obsolete; lbState.lbRecOutTab(tmp).keep_for_dbpitr := lbState.lbRecCmn.keep_for_dbpitr; lbState.lbRecOutTab(tmp).bs_status := lbState.lbRecCmn.bs_status; lbState.lbRecOutTab(tmp).bs_copies := lbState.lbRecCmn.bs_copies; lbState.lbRecOutTab(tmp).bs_bytes := lbState.lbRecCmn.bs_bytes / lbState.lbRecCmn.bs_copies; lbState.lbRecOutTab(tmp).bs_compressed := lbState.lbRecCmn.bs_compressed; lbState.lbRecOutTab(tmp).bs_tag := lbState.lbRecCmn.bs_tag; lbState.lbRecOutTab(tmp).bs_device_type := lbState.lbRecCmn.bs_device_type; IF (lbState.lbRecCmn.recid IS NOT NULL) THEN -- Following fields can only come from file_type backupset -- So, fill those fields only if we have a valid lbRecCmn lbState.lbRecOutTab(tmp).backup_type := lbState.lbRecCmn.backup_type; lbState.lbRecOutTab(tmp).keep := lbState.lbRecCmn.keep; lbState.lbRecOutTab(tmp).keep_options:= lbState.lbRecCmn.keep_options; lbState.lbRecOutTab(tmp).keep_until := lbState.lbRecCmn.keep_until; lbState.lbRecOutTab(tmp).bs_key := lbState.lbRecCmn.bs_key; lbState.lbRecOutTab(tmp).bs_count := lbState.lbRecCmn.bs_count; lbState.lbRecOutTab(tmp).bs_stamp := lbState.lbRecCmn.bs_stamp; lbState.lbRecOutTab(tmp).bs_type := lbState.lbRecCmn.bs_type; lbState.lbRecOutTab(tmp).bs_incr_type := lbState.lbRecCmn.bs_incr_type; lbState.lbRecOutTab(tmp).bs_pieces := lbState.lbRecCmn.bs_pieces; lbState.lbRecOutTab(tmp).bs_completion_time := lbState.lbRecCmn.bs_completion_time; END IF; lbState.lbRecOutTab_count := tmp + 1; END IF; END LOOP; END IF; -- If there is no more data, exit the loop. IF (lbCursor_notfound) THEN exit main_loop; END IF; -- If we are here because of missing dependent sections of backupset -- (like v$backup_set got reused before v$backup_piece/v$backup_redolog/ -- v$backup_datafile), then assume that this backupset isn't obsolete. -- If we are here because of copy, backupset, proxy record, then -- assume that this backupset or copy will be obsolete. IF (lbRec.file_type <> backupset_txt AND lbRec.backup_type = backupset_txt) THEN IF (debug) THEN deb(DEB_IN, 'setting lbObsoleteRetention to FALSE'); END IF; lbState.lbObsoleteRetention := FALSE; ELSE IF (debug) THEN deb(DEB_IN, 'setting lbObsoleteRetention to TRUE'); END IF; lbState.lbObsoleteRetention := TRUE; END IF; -- Assume that this backupset or copy will *not* be obsoleted because of -- its keep attributes. lbState.lbObsoleteKeep := FALSE; lbState.lbKeepForDbpitr := FALSE; -- Reset collection table lbState.lbRecTmpTab.delete; lbState.lbCopyCount := 0; lbState.lbPieceCountTab.delete; lbState.lbRecCmn := null_lbRec; END IF; IF (debug) THEN -- protect for performance printLbRec(lbRec); -- Check to see if the backupset, piece and file_type have same -- setStamp and setCount IF (lbRec.backup_type = backupset_txt AND lbRec.file_type <> backupset_txt) THEN IF ((lbState.lbRecCmn.bs_stamp = lbRec.bs_stamp AND lbState.lbRecCmn.bs_count = lbRec.bs_count) OR lbState.lbRecCmn.bs_key = lbRec.bs_key) THEN deb(DEB_IN, 'bs->bp->bdf/bsf/brl same bs'); ELSE deb(DEB_IN, 'bs->bp->bdf/bsf/brl **no** bs (or) **not** same bs'); END IF; END IF; END IF; -- Check the keep_until time and must keep list to decide whether this is -- an obsolete backup or not. On other hand, if the backup does not have -- keep options then check against must keep list, inside must keep scn, -- lbFbUntilTime, lbRlKeepSCN and lbRlKeepRlgSCN. If lbState.lbNeedObsoleteData THEN lbRec.obsolete := 'YES'; -- assume that backup is obsolete lbRec.keep_for_dbpitr := 'NO'; END IF; IF (lbState.lbObsoleteRetention AND NOT lbState.lbObsoleteKeep AND lbState.lbNeedObsoleteData) THEN -- For shorthand, decide what type of datafile backup is this? full_df_backup := FALSE; incr_df_backup := FALSE; arc_log_backup := FALSE; IF (lbRec.backup_type = backupset_txt) THEN IF (lbRec.file_type = archivedlog_txt) THEN arc_log_backup := TRUE; ELSIF (lbRec.file_type IN (spfile_txt, controlfile_txt)) THEN full_df_backup := TRUE; ELSIF (lbRec.file_type = datafile_txt) THEN IF (lbRec.df_incremental_change# = lbRec.df_creation_change# OR lbRec.bs_incr_type = full_txt) THEN full_df_backup := TRUE; ELSIF (lbRec.bs_incr_type <> full_txt) THEN incr_df_backup := TRUE; END IF; END IF; ELSIF (lbRec.backup_type IN (copy_txt, proxycopy_txt) AND lbRec.file_type IN (datafile_txt, controlfile_txt)) THEN full_df_backup := TRUE; END IF; -- Get the keep attributes. keep := NVL(lbRec.keep, 'NO'); keep_until := NVL(lbRec.keep_until, MAXDATEVAL); IF ((full_df_backup OR arc_log_backup OR incr_df_backup) AND lbRec.backup_type = backupset_txt) THEN keep := NVL(lbState.lbRecCmn.keep, 'NO'); keep_until := NVL(lbState.lbRecCmn.keep_until, MAXDATEVAL); END IF; -- Compare keep attributes with current IF (keep = 'YES') THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'Keep backup until ' || keep_until || ' - Checking ...'); END IF; IF (keep_until < lbState.lbNowtime) THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'expired -> obsolete by keep'); END IF; lbState.lbObsoleteKeep := TRUE; ELSE IF (debug) THEN -- protect for performance deb(DEB_IN, 'not expired -> no obsolete'); END IF; lbState.lbObsoleteRetention := FALSE; END IF; END IF; -- Check out backup attributes with current. IF (full_df_backup) THEN IF (keep != 'YES') THEN -- nokeep backup IF (debug) THEN -- protect for performance deb(DEB_IN, 'Full backup - Checking ...'); END IF; IF ((lbRec.file_type = spfile_txt AND lbRec.df_ckp_mod_time < -- it is SPFILE outside RW NVL(untilTime, MAXDATEVAL)) OR (lbRec.file_type <> spfile_txt AND -- other file outside RW ((untilTime IS NULL AND untilSCN IS NULL) OR lbRec.df_ckp_mod_time < untilTime OR (untilTime IS NULL AND lbRec.df_checkpoint_change# <= untilSCN)))) THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'nokeep backup outside RW'); END IF; IF (listBackupInMKL(lbState.lbMkTab, lbRec)) THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'inside MKL -> no obsolete'); deb(DEB_IN, 'keep_for_dbpitr = YES'); END IF; lbState.lbObsoleteRetention := FALSE; lbState.lbKeepForDbpitr := TRUE; ELSIF (listBackupInMKS(lbState.lbDfRecTabUs, lbRec, lbState.lbMaxDfNumber, FALSE)) THEN -- This full backup is not in must keep list and may be -- in orphan incarnation. It has to be kept only when -- its checkpoint_change# is greater than the keep list -- ignoring its status (ie unavailable/expired). IF (debug) THEN -- protect for performance deb(DEB_IN, 'inside MKS -> no obsolete'); END IF; lbState.lbObsoleteRetention := FALSE; lbState.lbKeepForDbpitr := FALSE; END IF; ELSE -- inside Recovery Win, not obsolete IF (debug) THEN -- protect for performance deb(DEB_IN, 'nokeep backup: inside RW -> no obsolete'); END IF; lbState.lbObsoleteRetention := FALSE; END IF; END IF; ELSIF (incr_df_backup) THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'Incremental backup - Checking ...'); END IF; IF (listBackupInMKL(lbState.lbMkITab, lbRec)) THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'inside MKL -> no obsolete'); deb(DEB_IN, 'keep_for_dbpitr = YES'); END IF; lbState.lbObsoleteRetention := FALSE; lbState.lbKeepForDbpitr := TRUE; ELSIF (listBackupInMKS(lbState.lbDfRecTabUs, lbRec, lbState.lbMaxDfNumber, TRUE)) THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'inside MKS -> no obsolete'); END IF; lbState.lbObsoleteRetention := FALSE; -- need this incremental lbState.lbKeepForDbpitr := FALSE; END IF; ELSIF (lbRec.file_type = archivedlog_txt) THEN --- redo log -- Per discussion with jwlee, we decided to purge the archivelogs -- on disk that is outside the grsp range. But, keep -- the archivelog backups since oldest GRP. -- IF ((lbRec.rl_next_change# >= lbState.lbMinGrsp AND lbRec.backup_type != copy_txt) OR -- (a) lbRec.rl_next_time >= lbState.lbFbUntilTime OR -- (b) (lbRec.rl_next_change# >= lbState.lbRlKeepSCN AND -- (c) (lbState.lbRlKeepRlgSCN IS NULL OR lbRec.rl_resetlogs_change# = lbState.lbRlKeepRlgSCN OR lbRec.rl_resetlogs_change# >= lbState.lbRlKeepSCN))) THEN IF (debug) THEN -- protect for performance IF (lbRec.rl_next_time >= lbState.lbFbUntilTime) THEN deb(DEB_IN, 'Redolog after lbFbUntilTime -> no obsolete'); ELSIF (lbRec.rl_next_change# >= lbState.lbMinGrsp AND lbRec.backup_type != copy_txt) THEN deb(DEB_IN, 'Redolog after lbMinGrsp -> no obsolete'); ELSE deb(DEB_IN, 'Redolog after lbRlkeepSCN -> no obsolete'); END IF; END IF; lbState.lbObsoleteRetention := FALSE; -- this redo must be kept END IF; END IF; END IF; IF (NOT lbState.lbKeepForDbpitr AND lbRec.file_type = archivedlog_txt) THEN IF (lbRec.rl_next_change# >= lbState.lbRlKeepSCN AND (lbRec.rl_first_change# <= untilSCN OR lbRec.rl_first_time <= untilTime) AND (lbState.lbRlKeepRlgSCN IS NULL OR lbRec.rl_resetlogs_change# = lbState.lbRlKeepRlgSCN OR lbRec.rl_resetlogs_change# >= lbState.lbRlKeepSCN)) THEN IF (debug) THEN deb(DEB_IN, 'keep_for_dbpitr = YES'); END IF; lbState.lbKeepForDbpitr := TRUE; END IF; END IF; -- If the above part said that this is not obsolete backup file, then -- we can write debugs saying that this object is not obsolete. IF (NOT lbState.lbObsoleteRetention AND NOT lbState.lbObsoleteKeep AND lbState.lbNeedObsoleteData) THEN IF (debug) THEN deb(DEB_IN, 'Not obsolete'); END IF; lbRec.obsolete := 'NO'; lbState.lbRecCmn.obsolete := 'NO'; IF (lbState.lbKeepForDbpitr) THEN lbState.lbRecCmn.keep_for_dbpitr := 'YES'; lbRec.keep_for_dbpitr := 'YES'; ELSE lbState.lbRecCmn.keep_for_dbpitr := 'NO'; lbRec.keep_for_dbpitr := 'NO'; END IF; IF (only_obsolete) THEN GOTO listBackup_end; END IF; END IF; -- If this row is a datafile which is part of backup set, then we should -- find the corresponding filename and tablespace name which is stored in -- the lbDfRecTab table. BTW, we also need tablespace name for copies. IF (lbRec.backup_type IN (backupset_txt, copy_txt) AND lbRec.file_type = datafile_txt) THEN found := FALSE; -- To optimize this scan start from df_file# index which is starting -- index unless there is a hole in file#. <> FOR i in lbRec.df_file#-1..lbState.lbDfRecTab.count-1 LOOP IF (lbState.lbDfRecTab(i).dfNumber = lbRec.df_file# AND lbState.lbDfRecTab(i).dfCreationSCN = lbRec.df_creation_change#) THEN IF (lbRec.backup_type = backupset_txt) THEN lbRec.fname := lbState.lbDfRecTab(i).fileName; END IF; lbRec.df_tablespace := lbState.lbDfRecTab(i).tsName; found := TRUE; EXIT loop_lbDfRecTab; ELSIF (lbState.lbDfRecTab(i).dfNumber > lbRec.df_file#) THEN EXIT loop_lbDfRecTab; END IF; END LOOP; IF (NOT found) THEN -- There is a hole in file#, so do a reverse scan for a match. <> FOR i in REVERSE 0..least(lbRec.df_file#-1, lbState.lbDfRecTab.count-1) LOOP IF (lbState.lbDfRecTab(i).dfNumber = lbRec.df_file# AND lbState.lbDfRecTab(i).dfCreationSCN = lbRec.df_creation_change#) THEN IF (lbRec.backup_type = backupset_txt) THEN lbRec.fname := lbState.lbDfRecTab(i).fileName; END IF; lbRec.df_tablespace := lbState.lbDfRecTab(i).tsName; found := TRUE; EXIT reverse_loop_lbDfRecTab; ELSIF (lbState.lbDfRecTab(i).dfNumber < lbRec.df_file#) THEN EXIT reverse_loop_lbDfRecTab; END IF; END LOOP; END IF; END IF; -- If lbRec.backup_type is backup set, then lbRec.file_type can be: -- * backupset_txt - contains metadata about the backupset -- * piece_txt - conatins metadata about backup pieces -- * datafile_txt/archivedlog_txt/spfile_txt/controlfile_txt - contains -- metadata about files included in this backup set IF (lbRec.backup_type = backupset_txt) THEN IF (lbRec.file_type = backupset_txt) THEN -- If lbRec.file_type is backupset, then we will reset the collection -- table lbRecTmpTab and all counters. We will also save meta-data -- about this backup set into lbRecCmn. -- Reset lbRecTmpTab, piece counts, and copy count. lbState.lbRecTmpTab.delete; lbState.lbCopyCount := 0; lbState.lbPieceCountTab.delete; -- Fill up the data which is shared with all backup set rows. -- We just copy entire lbRec. lbState.lbRecCmn := lbRec; lbState.lbRecCmn.bs_copies := 0; lbState.lbRecCmn.bs_bytes := 0; ELSIF lbRec.file_type in (datafile_txt, controlfile_txt, spfile_txt , archivedlog_txt, piece_txt) THEN IF (NOT only_obsolete OR lbRec.file_type = piece_txt) THEN -- Save the row into lbRecTmpTab collection table. lbState.lbRecTmpTab(lbState.lbRecTmpTab.count) := lbRec; IF (lbRec.file_type = piece_txt) THEN listBackupProcessPiece(lbRec, lbRecOut, lbState); END IF; END IF; ELSE -- This really cannot happen, so signal an internal error. deb(DEB_EXIT, 'with error 20999'); raise_application_error(-20999, 'internal error: listBackup_2'); END IF; ELSIF (lbRec.backup_type = copy_txt) THEN -- If lbRec.backup_type is copy then return the same data as returned -- from the lbCursor cursor. -- -- Do the filtering here for copies -- -- 1. Requested only recovery area files. -- 2. Specified device type. -- IF (diskDevice) THEN IF (recoveryDestFile AND lbRec.is_rdf = 'NO') THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'copy not recovery area file pkey: ' || lbRec.pkey); END IF; ELSE IF (debug) THEN -- protect for performance deb(DEB_IN, 'device allocated: pipelineing copy '||lbRec.pkey); END IF; lbState.lbRecOutTab(lbState.lbRecOutTab_count) := lbRec; lbState.lbRecOutTab_count := lbState.lbRecOutTab_count+1; END IF; ELSE IF (debug) THEN -- protect for performance deb(DEB_IN, 'device not allocated: skiping copy '||lbRec.pkey); END IF; END IF; ELSIF (lbRec.backup_type = proxycopy_txt) THEN -- If lbRec.backup_type is proxycopy then return the same data as -- returned from the lbCursor cursor. -- -- Do the filtering here for proxy copies -- -- 1. Requested only recovery area files. -- 2. Specified device type. -- IF (anyDevice = TRUE# OR isDeviceTypeAllocated(lbRec.device_type) = TRUE#) THEN IF (recoveryDestFile AND lbRec.is_rdf = 'NO') THEN IF (debug) THEN -- protect for performance deb(DEB_IN, 'proxycopy not a recovery area file pkey: ' || lbRec.pkey); END IF; ELSE IF (debug) THEN -- protect for performance deb(DEB_IN, 'device allocated: pipelineing proxycopy '|| lbRec.pkey); END IF; lbState.lbRecOutTab(lbState.lbRecOutTab_count) := lbRec; lbState.lbRecOutTab_count := lbState.lbRecOutTab_count+1; END IF; ELSE IF (debug) THEN -- protect for performance deb(DEB_IN, 'device not allocated: skiping proxycopy '||lbRec.pkey); END IF; END IF; ELSE deb(DEB_EXIT, 'with error 20999'); raise_application_error(-20999, 'internal error: listBackup_3'); END IF; END LOOP; -- If lbRecOutTab is not empty then return the last row. IF (lbState.lbRecOutTab_count > 0) THEN lbState.lbRecOutTab_count := lbState.lbRecOutTab_count - 1; lbRecOut := lbState.lbRecOutTab(lbState.lbRecOutTab_count); END IF; <> -- If lbRecOutTab is empty and there we reached end of lbCursor cursor, -- then close cursor and return FALSE. IF (lbState.lbRecOutTab_count = 0) THEN IF (piped_call) THEN IF (lbCursor%NOTFOUND) THEN CLOSE lbCursor; cacheBsRecTable.hint := noHint; IF (debug) THEN -- protect for performance deb(DEB_EXIT, 'FALSE'); END IF; RETURN FALSE; END IF; ELSE IF (listBackup_c%NOTFOUND) THEN CLOSE listBackup_c; cacheBsRecTable.hint := noHint; IF (debug) THEN -- protect for performance deb(DEB_EXIT, 'FALSE'); END IF; RETURN FALSE; END IF; END IF; END IF; deb(DEB_EXIT, 'TRUE'); RETURN TRUE; EXCEPTION WHEN OTHERS THEN IF (piped_call) THEN IF (lbCursor%ISOPEN) THEN CLOSE lbCursor; END IF; ELSE IF (listBackup_c%ISOPEN) THEN CLOSE listBackup_c; END IF; END IF; deb(DEB_EXIT, 'with exception: '||substr(sqlerrm, 1, 512)); RAISE; END listBackup; -- Set/Get Session key filters for job views PROCEDURE sv_setSessionKey(skey IN NUMBER) IS BEGIN session_key := skey; deb(DEB_PRINT, 'Session Key Filter='|| session_key); END; FUNCTION sv_getSessionKey RETURN NUMBER IS BEGIN return session_key; END; -- Set/Get Session Time range filter functions for job views. PROCEDURE sv_setSessionTimeRange(fromTime IN DATE, untilTime IN DATE) IS BEGIN session_fromTime := fromTime; session_untilTime := untilTime; deb(DEB_PRINT, 'Session Time range Filter='|| to_char(session_fromTime, 'MM/DD/YYYY HH24:MI:SS') || ' To ' || to_char(session_untilTime, 'MM/DD/YYYY HH24:MI:SS')); END; FUNCTION sv_getSessionfromTimeRange RETURN DATE IS BEGIN return session_fromtime; END; FUNCTION sv_getSessionUntilTimeRange RETURN DATE IS BEGIN return session_untilTime; END; ---------------------------- getRetentionPolicy ----------------------------- PROCEDURE getRetentionPolicy(recovery_window OUT number ,redundancy OUT number) IS conf_value varchar2(512); conf_name varchar2(512) := 'RETENTION POLICY'; conf# binary_integer; l1 binary_integer; l2 binary_integer; l3 binary_integer; BEGIN deb(DEB_ENTER, 'getRetentionPolicy'); recovery_window := 0; redundancy := 1; IF (findConfig_c%ISOPEN) THEN CLOSE findConfig_c; END IF; OPEN findConfig_c(conf_name, conf_value, null); FETCH findConfig_c INTO conf#, conf_name, conf_value; IF (NOT findConfig_c%NOTFOUND) THEN IF (conf_value like '%RECOVERY WINDOW%') THEN l1 := length('TO RECOVERY WINDOW OF '); l2 := length(' DAYS'); l3 := length(conf_value); recovery_window := to_number(substr(conf_value, l1, l3-l2-l1+1)); END IF; IF (conf_value like '%REDUNDANCY%') THEN l1 := length('TO REDUNDANCY '); l2 := length(conf_value); redundancy := to_number(substr(conf_value, l1, l2-l1+1)); END IF; IF (conf_value like '%NONE%') THEN -- NONE means that nothing is ever obsolete. So, redunancy and recovery -- to 0. It is upto the client to handle this. redundancy := 0; recovery_window := 0; END IF; END IF; CLOSE findConfig_c; deb(DEB_IN, 'recovery window = '||recovery_window); deb(DEB_IN, 'redundancy = '||redundancy); deb(DEB_EXIT, 'getRetentionPolicy'); END getRetentionPolicy; --------------------------- translateDataFileCopy ----------------------------- PROCEDURE translateDataFileCopy( duplicates IN number ,statusMask IN binary_integer ,onlyrdf IN binary_integer ,pluginSCN IN number DEFAULT 0) IS BEGIN validateState(getDatafileCopyCursor); -- Replaces cdf_tag. Note that we look only for ones in the current -- incarnation because that is what cdf_tag did. If access to -- datafilecopies in other incarnations is required, it must be done by -- key or filename. OPEN findDatafileBackup_c( sourcemask => imageCopy_con_t, reset_scn => this_reset_scn, reset_time => this_reset_time, statusMask => statusMask, duplicates => duplicates, onlyrdf => onlyrdf, pluginSCN => pluginSCN ); getDatafileCopyCursor := 'findDatafileBackup_c'; getDataFileCopyNoRows.error := NULL; getDataFileCopyNoRows.msg := 'Datafile copy does not exists'; getDataFileCopyDuplicates := duplicates; getDataFileCopyLast.dfNumber_obj := NULL; getDataFileCopySingleRow := FALSE; END translateDataFileCopy; --------getBackupHistory------------ -- Get Backup History Information -- ------------------------------------ PROCEDURE getDfBackupHistory( backedUpDev IN varchar2 ,first IN boolean ,bhistoryRec OUT NOCOPY bhistoryRec_t ,recentbackup IN boolean DEFAULT FALSE ,doingCmd IN varchar2 DEFAULT NULL ,keepTag IN varchar2 DEFAULT NULL ,toDest1 IN varchar2 DEFAULT NULL ,toDest2 IN varchar2 DEFAULT NULL ,toDest3 IN varchar2 DEFAULT NULL ,toDest4 IN varchar2 DEFAULT NULL) IS eof boolean := FALSE; local bhistoryRec_t; icount number := 0; locCreSCN number; lastCreSCN number; locRlgSCN number; lastRlgSCN number; locRlgTime date; lastRlgTime date; locCkpSCN number; lastCkpSCN number; BEGIN deb(DEB_ENTER, 'getDfBackupHistory'); IF (first) THEN getLastBackupHistory.dfNumber := NULL; IF (dfBackupHistory_c2%ISOPEN) THEN CLOSE dfBackupHistory_c2; END IF; deb(DEB_OPEN, 'dfBackupHistory_c2'); OPEN dfBackupHistory_c2(device_type => backedUpDev, cmd => doingCmd, ktag => keepTag, pattern1 => startWithPattern(toDest1), pattern2 => startWithPattern(toDest2), pattern3 => startWithPattern(toDest3), pattern4 => startWithPattern(toDest4)); END IF; IF (getLastBackupHistory.dfNumber IS NOT NULL AND (recentbackup OR getLastBackupHistory.ckp_scn = getLastBackupHistory.stop_scn)) THEN -- last file was offline/readonly now icount := 1; END IF; IF (getLastBackupHistory.dfNumber IS NOT NULL) THEN deb(DEB_IN, 'with file# = ' || to_char(getLastBackupHistory.dfNumber) || ' icount= ' || to_char(icount) || ' ckp_scn= ' || to_char(getLastBackupHistory.ckp_scn) || ' compTime= ' || to_char(getLastBackupHistory.compTime, 'DD-MON-RR HH24:MI:SS')); END IF; IF (NOT dfBackupHistory_c2%ISOPEN) THEN eof := TRUE; goto lastRow; END IF; <> FETCH dfBackupHistory_c2 INTO local; IF (dfBackupHistory_c2%NOTFOUND) THEN CLOSE dfBackupHistory_c2; eof := TRUE; ELSE IF (local.pluginSCN != 0) THEN locCreSCN := local.pluginSCN; locRlgSCN := local.pluginRlgSCN; locRlgTime := local.pluginRlgTime; lastCreSCN := getLastBackupHistory.pluginSCN; lastRlgSCN := getLastBackupHistory.pluginRlgSCN; lastRlgTime := getLastBackupHistory.pluginRlgTime; ELSE locCreSCN := local.create_scn; locRlgSCN := local.reset_scn; locRlgTime := local.reset_time; lastCreSCN := getLastBackupHistory.create_scn; lastRlgSCN := getLastBackupHistory.reset_scn; lastRlgTime := getLastBackupHistory.reset_time; END IF; IF (local.pluggedRonly = 1) THEN locCkpSCN := local.pluginSCN; lastCkpSCN := getLastBackupHistory.pluginSCN; ELSE locCkpSCN := local.ckp_scn; lastCkpSCN := getLastBackupHistory.ckp_scn; END IF; IF (getLastBackupHistory.dfNumber IS NULL OR (getLastBackupHistory.dfNumber = local.dfNumber AND lastCreSCN = locCreSCN AND lastRlgSCN = locRlgSCN AND lastRlgTime = locRlgTime)) THEN IF (recentbackup) THEN IF (getLastBackupHistory.dfNumber IS NULL OR locCkpSCN = lastCkpSCN) THEN -- this is same as the recent backup we saw icount := icount + 1; END IF; ELSIF (local.ckp_scn = local.stop_scn OR local.pluggedRonly = 1) THEN -- For compatability reasons: -- This file is offline/readonly now and a bkup on this device icount := icount + 1; -- bump the number of copies END IF; IF (getLastBackupHistory.dfNumber IS NULL) THEN getLastBackupHistory := local; -- remember the recent backup END IF; deb(DEB_IN, 'with file# = ' || to_char(local.dfNumber) || ' icount= ' || to_char(icount) || ' ckp_scn= ' || to_char(local.ckp_scn) || ' compTime= ' || to_char(local.compTime, 'DD-MON-RR HH24:MI:SS')); goto nextRow; END IF; END IF; -- We are here either a different filenumber, create_scn, reset_scn -- reset_time is seen or reached the eof <> IF (eof AND getLastBackupHistory.dfNumber IS NULL) THEN deb(DEB_EXIT, 'with: no_data_found'); RAISE no_data_found; END IF; -- the last backup contains the maximum completion time because -- we ordered by completion_time bhistoryRec := getLastBackupHistory; bhistoryRec.nbackups := icount; IF (eof) THEN getLastBackupHistory.dfNumber := NULL; -- for next time to raise no_data ELSE -- remember the last copy because we haven't returned this yet getLastBackupHistory := local; END IF; deb(DEB_EXIT, 'with file# = ' || to_char(bhistoryRec.dfNumber) || ' nbackups= ' || to_char(bhistoryRec.nbackups) || ' ckp_scn= ' || to_char(bhistoryRec.ckp_scn) || ' compTime= ' || to_char(bhistoryRec.compTime, 'DD-MON-RR HH24:MI:SS')); END getDfBackupHistory; PROCEDURE getDcBackupHistory( backedUpDev IN varchar2 ,first IN boolean ,bhistoryRec OUT NOCOPY bhistoryRec_t ,doingCmd IN varchar2 DEFAULT NULL ,keepTag IN varchar2 DEFAULT NULL ,toDest1 IN varchar2 DEFAULT NULL ,toDest2 IN varchar2 DEFAULT NULL ,toDest3 IN varchar2 DEFAULT NULL ,toDest4 IN varchar2 DEFAULT NULL) IS eof boolean := FALSE; local bhistoryRec_t; icount number := 0; locCreSCN number; lastCreSCN number; locRlgSCN number; lastRlgSCN number; locRlgTime date; lastRlgTime date; locCkpSCN number; lastCkpSCN number; BEGIN deb(DEB_ENTER, 'getDcBackupHistory'); IF (first) THEN getLastBackupHistory.dfNumber := NULL; IF (dcBackupHistory_c%ISOPEN) THEN CLOSE dcBackupHistory_c; END IF; deb(DEB_OPEN, 'dcBackupHistory_c'); OPEN dcBackupHistory_c(device_type => backedUpDev, cmd => doingCmd, ktag => keepTag, pattern1 => startWithPattern(toDest1), pattern2 => startWithPattern(toDest2), pattern3 => startWithPattern(toDest3), pattern4 => startWithPattern(toDest4)); END IF; IF (getLastBackupHistory.dfNumber IS NOT NULL AND getLastBackupHistory.ckp_scn = getLastBackupHistory.stop_scn) THEN icount := 1; END IF; IF (NOT dcBackupHistory_c%ISOPEN) THEN eof := TRUE; goto lastRow; END IF; IF (getLastBackupHistory.dfNumber IS NOT NULL) THEN deb(DEB_IN, 'with file# = ' || to_char(getLastBackupHistory.dfNumber) || ' create_scn= ' || to_char(getLastBackupHistory.create_scn) || ' reset_scn= ' || to_char(getLastBackupHistory.reset_scn) || ' reset_time= ' || to_char(getLastBackupHistory.reset_time, 'DD-MON-RR HH24:MI:SS') || ' ckp_scn= ' || to_char(getLastBackupHistory.ckp_scn) || ' stop_scn= ' || to_char(getLastBackupHistory.stop_scn) || ' nbackups= ' || to_char(getLastBackupHistory.nbackups) || ' compTime= ' || to_char(getLastBackupHistory.compTime, 'DD-MON-RR HH24:MI:SS')); END IF; <> FETCH dcBackupHistory_c INTO local; IF (dcBackupHistory_c%NOTFOUND) THEN CLOSE dcBackupHistory_c; eof := TRUE; ELSE IF (local.pluginSCN != 0) THEN locCreSCN := local.pluginSCN; locRlgSCN := local.pluginRlgSCN; locRlgTime := local.pluginRlgTime; lastCreSCN := getLastBackupHistory.pluginSCN; lastRlgSCN := getLastBackupHistory.pluginRlgSCN; lastRlgTime := getLastBackupHistory.pluginRlgTime; ELSE locCreSCN := local.create_scn; locRlgSCN := local.reset_scn; locRlgTime := local.reset_time; lastCreSCN := getLastBackupHistory.create_scn; lastRlgSCN := getLastBackupHistory.reset_scn; lastRlgTime := getLastBackupHistory.reset_time; END IF; IF (getLastBackupHistory.dfNumber IS NULL OR (getLastBackupHistory.dfNumber = local.dfNumber AND lastCreSCN = locCreSCN AND getLastBackupHistory.ckp_scn = local.ckp_scn AND getLastBackupHistory.ckp_time = local.ckp_time AND getLastBackupHistory.pluggedRonly = local.pluggedRonly AND lastRlgSCN = locRlgSCN AND lastRlgTime = locRlgTime)) THEN IF (local.ckp_scn = local.stop_scn OR local.pluggedRonly = 1) THEN -- this file is offline/readonly now icount := icount + 1; -- bump the number of copies END IF; getLastBackupHistory := local; -- remember the last copy deb(DEB_IN, 'with file# = ' || to_char(local.dfNumber) || ' create_scn= ' || to_char(local.create_scn) || ' reset_scn= ' || to_char(local.reset_scn) || ' reset_time= ' || to_char(local.reset_time, 'DD-MON-RR HH24:MI:SS') || ' ckp_scn= ' || to_char(local.ckp_scn) || ' stop_scn= ' || to_char(local.stop_scn) || ' nbackups= ' || to_char(local.nbackups) || ' compTime= ' || to_char(local.compTime, 'DD-MON-RR HH24:MI:SS')); goto nextRow; END IF; END IF; -- We are here either a different filenumber, create_scn, reset_scn -- reset_time is seen or reached the eof <> IF (eof AND getLastBackupHistory.dfNumber IS NULL) THEN deb(DEB_EXIT, 'with: no_data_found'); RAISE no_data_found; END IF; -- the last backup contains the maximum completion time because -- we ordered by completion_time bhistoryRec := getLastBackupHistory; bhistoryRec.nbackups := icount; IF (eof) THEN getLastBackupHistory.dfNumber := NULL; -- for next time to raise no_data ELSE -- remember the last copy because we haven't returned this yet getLastBackupHistory := local; END IF; deb(DEB_EXIT, 'with file# = ' || to_char(bhistoryRec.dfNumber) || ' create_scn= ' || to_char(bhistoryRec.create_scn) || ' reset_scn= ' || to_char(bhistoryRec.reset_scn) || ' reset_time= ' || to_char(bhistoryRec.reset_time, 'DD-MON-RR HH24:MI:SS') || ' ckp_scn= ' || to_char(bhistoryRec.ckp_scn) || ' stop_scn= ' || to_char(bhistoryRec.stop_scn) || ' nbackups= ' || to_char(bhistoryRec.nbackups) || ' compTime= ' || to_char(bhistoryRec.compTime, 'DD-MON-RR HH24:MI:SS')); END getDcBackupHistory; PROCEDURE getAlBackupHistory( backedUpDev IN varchar2 ,first IN boolean ,bhistoryRec OUT NOCOPY bhistoryRec_t ,doingCmd IN varchar2 DEFAULT NULL ,keepTag IN varchar2 DEFAULT NULL ,toDest1 IN varchar2 DEFAULT NULL ,toDest2 IN varchar2 DEFAULT NULL ,toDest3 IN varchar2 DEFAULT NULL ,toDest4 IN varchar2 DEFAULT NULL) IS eof boolean := FALSE; local bhistoryRec_t; icount number := 0; BEGIN deb(DEB_ENTER, 'getAlBackupHistory'); IF (first) THEN getLastBackupHistory.logThread := NULL; IF (alBackupHistory_c2%ISOPEN) THEN CLOSE alBackupHistory_c2; END IF; deb(DEB_OPEN, 'alBackupHistory_c2'); OPEN alBackupHistory_c2(device_type => backedUpDev, cmd => doingCmd, ktag => keepTag, pattern1 => startWithPattern(toDest1), pattern2 => startWithPattern(toDest2), pattern3 => startWithPattern(toDest3), pattern4 => startWithPattern(toDest4)); END IF; IF (getLastBackupHistory.logThread IS NOT NULL) THEN icount := 1; END IF; IF (NOT alBackupHistory_c2%ISOPEN) THEN eof := TRUE; goto lastRow; END IF; <> FETCH alBackupHistory_c2 INTO local; IF (alBackupHistory_c2%NOTFOUND) THEN CLOSE alBackupHistory_c2; eof := TRUE; ELSIF (getLastBackupHistory.logThread IS NULL OR (getLastBackupHistory.logThread = local.logThread AND getLastBackupHistory.logSequence = local.logSequence AND getLastBackupHistory.logTerminal = local.logTerminal AND getLastBackupHistory.next_scn = local.next_scn AND getLastBackupHistory.reset_scn = local.reset_scn AND getLastBackupHistory.reset_time = local.reset_time)) THEN icount := icount + 1; -- bump the number of copies getLastBackupHistory := local; -- remember the last copy deb(DEB_IN, 'with (reset_scn, reset_time, thread#, sequence#, terminal)=(' || to_char(local.reset_scn) || ',' || to_char(local.reset_time,'DD-MON-RR HH24:MI:SS') || ',' || to_char(local.logThread) || ',' || to_char(local.logSequence) || ',' || to_char(local.logTerminal) || ')' || ' nbackups= ' || local.nbackups|| ' compTime= ' || to_char(local.compTime, 'DD-MON-RR HH24:MI:SS')); goto nextRow; END IF; -- We are here either because a different (thread#, sequence#) is seen or -- reached the eof <> IF (eof AND getLastBackupHistory.logThread IS NULL) THEN deb(DEB_EXIT, 'with: no_data_found'); RAISE no_data_found; END IF; -- the last backup contains the maximum completion time because -- we ordered by completion_time bhistoryRec := getLastBackupHistory; bhistoryRec.nbackups := icount; IF (eof) THEN getLastBackupHistory.logThread := NULL; -- for next time to raise no_data ELSE -- remember the last copy because we haven't returned this yet getLastBackupHistory := local; END IF; deb(DEB_EXIT, 'with (reset_scn, reset_time, thread#, sequence#, terminal)=(' || to_char(bhistoryRec.reset_scn) || ',' || to_char(bhistoryRec.reset_time,'DD-MON-RR HH24:MI:SS') || ',' || to_char(bhistoryRec.logThread) || ',' || to_char(bhistoryRec.logSequence) || ',' || to_char(bhistoryRec.logTerminal) || ')' || ' nbackups= ' || bhistoryRec.nbackups|| ' compTime= ' || to_char(bhistoryRec.compTime, 'DD-MON-RR HH24:MI:SS')); END getAlBackupHistory; PROCEDURE getBsBackupHistory( backedUpDev IN varchar2 ,first IN boolean ,set_stamp IN number DEFAULT NULL ,set_count IN number DEFAULT NULL ,bhistoryRec OUT NOCOPY bhistoryRec_t ,doingCmd IN varchar2 DEFAULT NULL ,keepTag IN varchar2 DEFAULT NULL ,toDest1 IN varchar2 DEFAULT NULL ,toDest2 IN varchar2 DEFAULT NULL ,toDest3 IN varchar2 DEFAULT NULL ,toDest4 IN varchar2 DEFAULT NULL) IS eof boolean := FALSE; local bhistoryRec_t; icount number := 0; bpRec bpRec_t; BEGIN deb(DEB_ENTER, 'getBsBackupHistory'); IF (set_stamp IS NOT NULL AND set_count IS NOT NULL) THEN IF (NOT first) THEN deb(DEB_EXIT, 'with: no_data_found'); RAISE no_data_found; END IF; bpRec.setStamp := set_stamp; bpRec.setCount := set_count; getBackupHistory(bpRec => bpRec, backedUpDev => backedUpDev, nbackupsFlag => 1, bscompletionFlag => 1, nbackups => bhistoryRec.nbackups, bscompletion => bhistoryRec.compTime, todest1 => toDest1, todest2 => toDest2, todest3 => todest3, todest4 => todest4); IF (bhistoryRec.nbackups = 0) THEN deb(DEB_EXIT, 'with: no_data_found'); RAISE no_data_found; END IF; bhistoryRec.setStamp := set_stamp; bhistoryRec.setCount := set_count; deb(DEB_EXIT, 'with set_stamp = ' || to_char(bhistoryRec.setStamp) || ' set_count = ' || to_char(bhistoryRec.setCount) || ' nbackups= ' || to_char(bhistoryRec.nbackups) || ' compTime= ' || to_char(bhistoryRec.compTime, 'DD-MON-RR HH24:MI:SS')); RETURN; END IF; IF (first) THEN getLastBackupHistory.setStamp := NULL; IF (bsBackupHistory_c2%ISOPEN) THEN CLOSE bsBackupHistory_c2; END IF; deb(DEB_OPEN, 'bsBackupHistory_c2'); OPEN bsBackupHistory_c2(device_type => backedUpDev, cmd => doingCmd, ktag => keepTag, pattern1 => startWithPattern(toDest1), pattern2 => startWithPattern(toDest2), pattern3 => startWithPattern(toDest3), pattern4 => startWithPattern(toDest4)); END IF; IF (getLastBackupHistory.setStamp IS NOT NULL) THEN icount := 1; END IF; IF (NOT bsBackupHistory_c2%ISOPEN) THEN eof := TRUE; goto lastRow; END IF; <> FETCH bsBackupHistory_c2 INTO local; IF (bsBackupHistory_c2%NOTFOUND) THEN CLOSE bsBackupHistory_c2; eof := TRUE; ELSIF (getLastBackupHistory.setStamp IS NULL OR (getLastBackupHistory.setStamp = local.setStamp AND getLastBackupHistory.setCount = local.setCount)) THEN icount := icount + 1; -- bump the number of copies getLastBackupHistory := local; -- remember the last copy goto nextRow; END IF; -- We are here either because a different key is seen or -- reached the eof <> IF (eof AND getLastBackupHistory.setStamp IS NULL) THEN deb(DEB_EXIT, 'with: no_data_found'); RAISE no_data_found; END IF; -- the last backup contains the maximum completion time because -- we ordered by completion_time bhistoryRec := getLastBackupHistory; bhistoryRec.nbackups := icount; IF (eof) THEN getLastBackupHistory.setStamp := NULL; -- for next time to raise no_data ELSE -- remember the last copy because we haven't returned this yet getLastBackupHistory := local; END IF; deb(DEB_EXIT, 'with set_stamp = ' || to_char(bhistoryRec.setStamp) || 'set_count = ' || to_char(bhistoryRec.setCount) || ' nbackups= ' || to_char(bhistoryRec.nbackups) || ' compTime= ' || to_char(bhistoryRec.compTime, 'DD-MON-RR HH24:MI:SS')); END getBsBackupHistory; -- Obsolete as of 9.2.0.1 -- DataFile History PROCEDURE getBackupHistory( dfRec IN dfRec_t ,backedUpDev IN varchar2 ,nbackupsFlag IN number ,bscompletionFlag IN number ,nbackups OUT number ,bscompletion OUT date) IS local bhistoryRec_t; BEGIN deb(DEB_ENTER, 'getBackupHistory'); nbackups := 0; bscompletion := NULL; -- not interested in history IF ((nbackupsFlag != 1 OR backedUpDev IS NULL) AND bscompletionFlag != 1) THEN deb(DEB_EXIT, 'with not interested'); RETURN; END IF; IF (dfBackupHistory_c1%ISOPEN) THEN CLOSE dfBackupHistory_c1; END IF; OPEN dfBackupHistory_c1(file# => dfRec.dfNumber ,crescn => dfRec.dfCreationSCN ,device_type => backedUpDev); <> FETCH dfBackupHistory_c1 INTO local; IF (dfBackupHistory_c1%NOTFOUND) THEN CLOSE dfBackupHistory_c1; ELSE IF (local.reset_scn = this_reset_scn AND local.reset_time = this_reset_time AND local.ckp_scn = dfRec.stopSCN) THEN nbackups := nbackups + 1; END IF; bscompletion := local.compTime; goto nextRow; END IF; deb(DEB_EXIT); END getBackupHistory; -- Obsolete from 9.2.0.1 version onwards -- Archivelog History PROCEDURE getBackupHistory( alRec IN alRec_t ,backedUpDev IN varchar2 ,nbackupsFlag IN number ,bscompletionFlag IN number ,nbackups OUT number ,bscompletion OUT date) IS local bhistoryRec_t; BEGIN deb(DEB_ENTER, 'getBackupHistory'); nbackups := 0; bscompletion := NULL; -- not interested in history IF ((nbackupsFlag != 1 OR backedUpDev IS NULL) AND bscompletionFlag != 1) THEN deb(DEB_EXIT, 'with not interested'); RETURN; END IF; IF (alBackupHistory_c1%ISOPEN) THEN CLOSE alBackupHistory_c1; END IF; OPEN alBackupHistory_c1(thread# => alRec.thread ,sequence# => alRec.sequence ,device_type => backedUpDev); <> FETCH alBackupHistory_c1 INTO local; IF (alBackupHistory_c1%NOTFOUND) THEN CLOSE alBackupHistory_c1; ELSE nbackups := nbackups + 1; bscompletion := local.compTime; goto nextRow; END IF; deb(DEB_EXIT); END getBackupHistory; -- BackupPiece History PROCEDURE getBackupHistory( bpRec IN bpRec_t ,backedUpDev IN varchar2 ,nbackupsFlag IN number ,bscompletionFlag IN number ,nbackups OUT number ,bscompletion OUT date ,todest1 IN varchar2 DEFAULT NULL ,todest2 IN varchar2 DEFAULT NULL ,todest3 IN varchar2 DEFAULT NULL ,todest4 IN varchar2 DEFAULT NULL) IS local bhistoryRec_t; BEGIN deb(DEB_ENTER, 'getBackupHistory'); nbackups := 0; bscompletion := NULL; -- not interested in history IF ((nbackupsFlag != 1 OR backedUpDev IS NULL) AND bscompletionFlag != 1) THEN deb(DEB_EXIT, 'with not interested'); RETURN; END IF; IF (bsBackupHistory_c1%ISOPEN) THEN CLOSE bsBackupHistory_c1; END IF; OPEN bsBackupHistory_c1(set_stamp => bpRec.setStamp ,set_count => bpRec.setCount ,device_type => backedUpDev ,pattern1 => startWithPattern(toDest1) ,pattern2 => startWithPattern(toDest2) ,pattern3 => startWithPattern(toDest3) ,pattern4 => startWithPattern(toDest4)); <> FETCH bsBackupHistory_c1 INTO local; IF (bsBackupHistory_c1%NOTFOUND) THEN CLOSE bsBackupHistory_c1; ELSE nbackups := nbackups + 1; bscompletion := local.compTime; goto nextRow; END IF; deb(DEB_EXIT); END getBackupHistory; ------------------------ -- Controlfile Backup -- ------------------------ -----------------------------findControlfileBackup ---------------------------- PROCEDURE findControlfileBackup( allCopies IN boolean default FALSE) IS cfrec rcvRec_t; tag varchar2(32); valRC binary_integer; validateRec validBackupSetRec_t; BEGIN deb(DEB_ENTER, 'findControlfileBackup'); validateState(null); if onlyStandby is NULL then deb(DEB_IN, 'onlyStandby is set to NULL '); elsif onlyStandby = TRUE# then deb(DEB_IN, 'onlyStandby is set to TRUE '); else deb(DEB_IN, 'onlyStandby is set to FALSE '); end if; -- Open the cursor OPEN findControlfileBackup_c(sourcemask => restoreSource, currentIncarnation => TRUE#, tag => restoreTag, untilSCN => get_cfUntilScn(), statusMask => BSavailable, needstby => onlyStandby); -- Mark as the cursor opened so that getControlfileBackup doesn't open -- again. Prior 10i version doesn't call findControlfileBackup. So, this -- exists for compatibility findControlfileBackupCursor := TRUE; -- init stack, age, status variables resetthisBackupAge; getBS_status := NULL; resetrcvRecStack; IF (allCopies) THEN deb(DEB_IN, 'allCopies is TRUE'); ELSE deb(DEB_IN, 'allCopies is FALSE'); END IF; LOOP valRC := NULL; FETCH findControlfileBackup_c INTO cfrec; EXIT WHEN findControlfileBackup_c%NOTFOUND ; IF (cfrec.type_con = imageCopy_con_t) THEN deb(DEB_IN, 'findControlfileBackup found a controlfilecopy:'); IF (diskDevice) THEN valRC := SUCCESS; ELSE valRC := AVAILABLE; END IF; ELSIF (cfrec.type_con = backupSet_con_t) THEN deb(DEB_IN, 'findControlfileBackup found a controlfile backup:'); valRC := validateBackupSet( backupSetRec => cfrec, tag => restoreTag, tagMatchRequired => TRUE, checkDeviceIsAllocated => TRUE, availableMask => BSavailable, validRec => validateRec); ELSIF (cfrec.type_con = proxyCopy_con_t) THEN deb(DEB_IN, 'findControlfileBackup found a controlfile proxy copy:'); IF (restoreTag is not NULL AND cfrec.tag_con != restoreTag) THEN -- We cannot restore/list this backup set because we don't have the -- right tag. We want to remember this fact so that we can give -- the user a meaningful error message. deb(DEB_EXIT, 'tag does not match'); valRC := UNAVAILABLE; ELSIF (anyDevice = TRUE# OR isDeviceTypeAllocated(cfrec.deviceType_con) = TRUE#) THEN valRC := SUCCESS; ELSE valRC := AVAILABLE; END IF; END IF; <> IF (getBS_status IS NULL AND valRC = AVAILABLE) THEN getBS_status := valRC; END IF; IF (debug) THEN printRcvRec(cfrec); END IF; IF (valRC = SUCCESS) THEN IF (thisBackupAge < rcvRecBackupAge) THEN deb(DEB_IN, 'skipping action because thisBackupAge (' || thisBackupAge || ') < rcvRecBackupAge(' || rcvRecBackupAge || ')'); thisBackupAge := thisBackupAge + 1; ELSE deb(DEB_IN, ' Added cfrec:'); IF (cfrec.type_con = backupSet_con_t) THEN cfrec.tag_con := validateRec.tag; cfrec.deviceType_con := validateRec.deviceType; cfrec.copyNumber_con := validateRec.copyNumber; END IF; rcvRecPush(cfrec); -- add record for this action getBS_status := SUCCESS; IF (allCopies and cfrec.type_con = backupSet_con_t) THEN -- requested duplex copies -- Keep calling validateBackupSet0 until it fails to return -- SUCCESS. valRC := validateBackupSet0( tag => restoreTag, tagMatchRequired => TRUE, checkDeviceIsAllocated => TRUE, validRec => validateRec); IF (valRC = SUCCESS) THEN GOTO addAnother; END IF; END IF; EXIT; -- we are done END IF; END IF; END LOOP; CLOSE findControlfileBackup_c; IF (getBS_status = SUCCESS) THEN deb(DEB_EXIT, 'with: SUCCESS'); ELSIF (getBS_status = AVAILABLE) THEN deb(DEB_EXIT, 'with: AVAILABLE'); ELSE getBS_status := UNAVAILABLE; deb(DEB_EXIT, 'with: UNAVAILABLE'); END IF; END findControlfileBackup; ----------------------------- getControlfileBackup ---------------------------- FUNCTION getControlfileBackup( rcvRec OUT NOCOPY rcvRec_t) RETURN number IS local rcvRec_t; BEGIN deb(DEB_ENTER, 'getControlfileBackupBackup'); IF (NOT findControlfileBackupCursor) THEN -- Prior 10i version doesn't call findControlfileBackupCursor findControlfileBackup(FALSE); findControlfileBackupCursor := FALSE; END IF; IF (getRecStackCount = 0) THEN IF (getBS_status = SUCCESS) THEN deb(DEB_EXIT, 'with no more records'); raise no_data_found; ELSIF (getBS_status = AVAILABLE) THEN deb(DEB_EXIT, 'with: AVAILABLE'); RETURN AVAILABLE; ELSE deb(DEB_EXIT, 'with: UNAVAILABLE'); RETURN UNAVAILABLE; END IF; END IF; rcvRecPop(local); IF (debug) THEN printRcvRec(local); END IF; rcvRec := local; deb(DEB_EXIT, 'with: SUCCESS'); RETURN SUCCESS; END getControlfileBackup; ------------------------- -- Archived Log Backup -- ------------------------- ------------------------- findRangeArchivedLogBackup -------------------------- -- We have to give preference to disk archivelog backups over tape -- archivelog backups. So, we stack all archivelog backups that exists -- on non-disk device in a temporary stack called tapebkp. Only -- disk backups are considered first. If there are no disk backups or -- the number of disk backups doesn't satisfy the backupAge, then we -- look for tape backups. -- PROCEDURE findRangeArchivedLogBackup( minthread IN number ,minsequence IN number ,minlowSCN IN number ,maxthread IN number ,maxsequence IN number ,maxlowSCN IN number ,allCopies IN boolean default FALSE) IS lastrec rcvRec_t; brlrec rcvRec_t; validateRec validBackupSetRec_t; tapebkp rcvRecTabI_t;-- stack of tape backups valRC number; BSstatus number; -- status of current backup skipped boolean; FUNCTION dupbs(bs1 in rcvRec_t, bs2 in rcvRec_t) RETURN BOOLEAN IS BEGIN IF (bs1.type_con != backupSet_con_t OR bs2.type_con != backupSet_con_t) THEN RETURN FALSE; END IF; IF (bs1.setStamp_con != bs2.setStamp_con OR bs1.setCount_con != bs2.setCount_con) THEN RETURN FALSE; END IF; RETURN TRUE; END dupbs; FUNCTION pushTapeBackup(tapebkp in rcvRecTabI_t) RETURN BOOLEAN IS found boolean := FALSE; BEGIN deb(DEB_IN, 'looking for tape backups'); FOR i IN 1..tapebkp.count LOOP -- stack all duplicate tape backups that is found EXIT when (found AND NOT dupbs(tapebkp(i), tapebkp(i-1))); IF (thisBackupAge < rcvRecBackupAge) THEN deb(DEB_IN, 'skipping action because thisBackupAge (' || thisBackupAge || ') < rcvRecBackupAge(' || rcvRecBackupAge || ')'); -- duplicate tape backups are considered to be of same age IF (i = 1 OR NOT dupbs(tapebkp(i), tapebkp(i-1))) THEN thisBackupAge := thisBackupAge + 1; END IF; ELSE deb(DEB_IN, 'Added tape backup ' || i); rcvRecPush(tapebkp(i)); found := TRUE; END IF; END LOOP; RETURN found; END pushTapeBackup; BEGIN deb(DEB_ENTER, 'findRangeArchivedLogBackup'); validateState(null); lastrec.logThread_obj := 0; -- Use findRangeArcLogBackup cursor instead of findArcLogBackup because -- findRangeArcLogBackup cursor uses brl_i_dts index that prevents full -- table scan over brl. deb(DEB_OPEN, 'findRangeArcLogBackup'); OPEN findRangeArcLogBackup(sourcemask => to_number(null), minthread => minthread, minsequence => minsequence, minlowSCN => minlowSCN, maxthread => maxthread, maxsequence => maxsequence, maxlowSCN => maxlowSCN); resetrcvRecStack; tapebkp.delete; BSstatus := NULL; IF (allCopies) THEN deb(DEB_IN, 'allCopies is TRUE'); ELSE deb(DEB_IN, 'allCopies is FALSE'); END IF; LOOP <> valRC := NULL; FETCH findRangeArcLogBackup INTO brlrec; IF (findRangeArcLogBackup%NOTFOUND) THEN -- If the last record was not successfully added, then look for -- tape backups that was stored in temporary stack and move it -- rcvRecStack. IF (BSstatus IS NULL OR BSstatus != SUCCESS) THEN IF (pushTapeBackup(tapebkp)) THEN BSstatus := SUCCESS; END IF; END IF; -- If there are no more records, check if the last status was -- available. If so, then add it to stack. IF (BSstatus = AVAILABLE AND lastrec.logThread_obj != 0) THEN deb(DEB_IN, ' Added lastlrec:'); -- We have to indicate this record as available on another -- device in the stack. So, i overload the status_con with -- '*' to represent that. lastrec.status_con := '*'; rcvRecPush(lastrec); END IF; EXIT; END IF; -- Before moving to next archivelog record, check if last status was -- available. If so, then add it to stack. IF (brlrec.logSequence_obj != lastrec.logSequence_obj OR brlrec.logThread_obj != lastrec.logThread_obj OR brlrec.logRlgSCN_obj != lastrec.logRlgSCN_obj OR brlrec.logRlgTime_obj != lastrec.logRlgTime_obj OR brlrec.logLowSCN_obj != lastrec.logLowSCN_obj OR brlrec.logNextSCN_obj != lastrec.logNextSCN_obj) THEN -- If the last record was not successfully added, then look for -- tape backups that was pushed to temporary stack and move it -- rcvRecStack. IF (BSstatus IS NULL OR BSstatus != SUCCESS) THEN IF (pushTapeBackup(tapebkp)) THEN BSstatus := SUCCESS; END IF; END IF; resetthisBackupAge; -- init stack, age, status variables tapebkp.delete; IF (BSstatus = AVAILABLE) THEN -- We have to indicate this record as available on another -- device in the stack. So, i overload the status_con with -- '*' to represent that. deb(DEB_IN, ' Added lastlrec:'); lastrec.status_con := '*'; rcvRecPush(lastrec); END IF; BSstatus := NULL; -- reset backup status for next record -- Is this new record part of translation?. IF NOT isTranslatedArchivedLog( thread# => brlrec.logThread_obj ,sequence# => brlrec.logSequence_obj) THEN deb(DEB_IN, 'skip not translated brlrec' || ' thread=' || brlrec.logThread_obj || ' sequence=' || brlrec.logSequence_obj); goto nextbrlRec; END IF; -- Yes, it is part of translation. Save this record to add it later -- if it is not available in this device. lastrec := brlrec; deb(DEB_IN, 'looking backups for' || ' thread=' || brlrec.logThread_obj || ' sequence=' || brlrec.logSequence_obj); END IF; IF (BSstatus = SUCCESS) THEN deb(DEB_IN, 'skip already stacked brlrec' || ' thread=' || brlrec.logThread_obj || ' sequence=' || brlrec.logSequence_obj); goto nextbrlRec; END IF; IF (brlrec.type_con = backupSet_con_t) THEN deb(DEB_IN, 'found a backupset:'); valRC := validateBackupSet(backupSetRec => brlrec, checkDeviceIsAllocated => TRUE, tag => restoreTag, tagMatchRequired => TRUE, availableMask => BSavailable, validRec => validateRec); ELSIF (brlrec.type_con = proxyCopy_con_t) THEN deb(DEB_IN, 'found a proxy copy:'); IF (restoreTag is not NULL AND brlrec.tag_con != restoreTag) THEN -- We cannot restore/list this backup set because we don't have the -- right tag. We want to remember this fact so that we can give -- the user a meaningful error message. deb(DEB_EXIT, 'tag does not match'); valRC := UNAVAILABLE; ELSIF (anyDevice = TRUE# OR isDeviceTypeAllocated(brlrec.deviceType_con) = TRUE#) THEN valRC := SUCCESS; ELSE valRC := AVAILABLE; END IF; END IF; skipped := FALSE; <> IF (BSstatus IS NULL AND valRC = AVAILABLE) THEN BSstatus := valRC; END IF; IF (debug) THEN printRcvRec(brlrec); END IF; IF (valRC = SUCCESS) THEN IF (brlrec.type_con = backupSet_con_t) THEN brlrec.tag_con := validateRec.tag; brlrec.deviceType_con := validateRec.deviceType; brlrec.copyNumber_con := validateRec.copyNumber; END IF; -- Give preference to disk backups. Tape backups are not -- considered first. Rather they are moved to a temporary stack -- which is then scanned if there are no disk backups that -- satisfy backupAge. IF (NOT skipped AND brlrec.deviceType_con = 'DISK' AND thisBackupAge < rcvRecBackupAge) THEN deb(DEB_IN, 'skipping action because thisBackupAge (' || thisBackupAge || ') < rcvRecBackupAge(' || rcvRecBackupAge || ')'); -- This brlrec is skipped. So, any duplex disk backupsets -- should not accounted for age and not added to stack. skipped := TRUE; thisBackupAge := thisBackupAge + 1; ELSE IF (brlrec.deviceType_con = 'DISK') THEN IF (NOT skipped) THEN deb(DEB_IN, ' Added brlrec:'); rcvRecPush(brlrec); -- add record for this action BSstatus := SUCCESS; END IF; ELSE -- add this record to temporary stack deb(DEB_IN, ' Added brlrec to tapebkp stack:' || to_char(tapebkp.count+1)); tapebkp(tapebkp.count+1) := brlrec; END IF; END IF; IF (allCopies AND brlrec.type_con = backupSet_con_t) THEN -- requested duplex copies -- Keep calling validateBackupSet0 until it fails to return -- SUCCESS. valRC := validateBackupSet0( tag => restoreTag, tagMatchRequired => TRUE, checkDeviceIsAllocated => TRUE, validRec => validateRec); IF (valRC = SUCCESS) THEN GOTO addAnother; END IF; END IF; END IF; END LOOP; CLOSE findRangeArcLogBackup; deb(DEB_EXIT); END findRangeArchivedLogBackup; -- Obsolete as of 11g ---------------------------- findArchivedLogBackup ---------------------------- PROCEDURE findArchivedLogBackup( thread IN number ,sequence IN number ,lowSCN IN number ,allCopies IN boolean default FALSE) IS local rcvRec_t; BEGIN deb(DEB_ENTER, 'findArchivedLogBackup'); -- set this archivedLog Record as part of translation setArchivedLogRecord(thread# => thread ,sequence# => sequence ,first => TRUE); -- Use findRangeArcLogBackup cursor instead of findArcLogBackup because -- findRangeArcLogBackup cursor uses brl_i_dts index that prevents full -- table scan over brl. findRangeArchivedLogBackup(minthread => thread, minsequence => sequence, minlowSCN => lowSCN, maxthread => thread, maxsequence => sequence, maxlowSCN => lowSCN, allCopies => allCopies); IF (getRecStackCount = 0) THEN getBS_status := NULL; deb(DEB_EXIT, 'with UNAVAILABLE'); ELSE rcvRecTop(local); -- If the stacked record is of status '*', then it is available -- on another device. IF (local.status_con = '*') THEN getBS_status := AVAILABLE; resetrcvRecStack; deb(DEB_EXIT, 'with AVAILABLE'); ELSE getBS_status := SUCCESS; deb(DEB_EXIT, 'with SUCCESS'); END IF; END IF; END findArchivedLogBackup; -- Obsolete as of 11g ----------------------------- getArchivedLogBackup ---------------------------- FUNCTION getArchivedLogBackup( rcvRec OUT NOCOPY rcvRec_t) RETURN binary_integer IS local rcvRec_t; BEGIN deb(DEB_ENTER, 'getArchivedLogBackup'); IF (getRecStackCount = 0) THEN IF (getBS_status = SUCCESS) THEN deb(DEB_EXIT, 'with no more records'); raise no_data_found; ELSIF (getBS_status = AVAILABLE) THEN deb(DEB_EXIT, 'with: AVAILABLE'); RETURN AVAILABLE; ELSE deb(DEB_EXIT, 'with: UNAVAILABLE'); RETURN UNAVAILABLE; END IF; END IF; rcvRecPop(local); IF (debug) THEN printRcvRec(local); END IF; rcvRec := local; deb(DEB_EXIT, 'with: SUCCESS'); RETURN SUCCESS; END getArchivedLogBackup; ------------------- -- SPFILE Backup -- ------------------- -------------------------------findSpfileBackup-------------------------------- PROCEDURE findSpfileBackup( allCopies IN boolean default FALSE -- duplex copies ,redundancy IN number default NULL -- number of redundant copies ,rmanCmd IN number default unknownCmd_t) -- called for what rman command? IS scn_warn number; BEGIN findSpfileBackup(allCopies, redundancy, rmanCmd, scn_warn); END findSpfileBackup; PROCEDURE findSpfileBackup( allCopies IN boolean default FALSE -- duplex copies ,redundancy IN number default NULL -- number of redundant copies ,rmanCmd IN number default unknownCmd_t -- called for what rman command? ,scn_warn OUT number) IS bsfrec rcvRec_t; tag varchar2(32); valRC binary_integer; validateRec validBackupSetRec_t; lcopies number; findtime date; estimated boolean := TRUE; BEGIN deb(DEB_ENTER, 'findSpfileBackup'); validateState(null); -- Compute the time to use scn_warn := 0; IF (untilScn IS NOT NULL AND untilTime IS NULL) THEN computeSpfileTime(untilScn, findtime, allIncarnations, estimated); IF estimated THEN scn_warn := 1; END IF; ELSE findtime := untilTime; END IF; -- Open the cursor deb(DEB_OPEN, 'findSpfileBackup_c,rmanCmd=' || rmanCmd); OPEN findSpfileBackup_c(untilTime => findTime, rmanCmd => rmanCmd); -- Mark as the cursor opened so that getSpfileBackup doesn't open again -- Prior 10i version doesn't call findSpfileBackup. So, this -- exists for compatibility findSpfileBackupCursor := TRUE; -- init stack, age, status variables resetthisBackupAge; getBS_status := NULL; resetrcvRecStack; IF (allCopies) THEN deb(DEB_IN, 'allCopies is TRUE'); ELSE deb(DEB_IN, 'allCopies is FALSE'); END IF; lcopies := redundancy; -- can't use allCopies for obsolete command IF (rmanCmd = obsoleteCmd_t AND allCopies) THEN raise_application_error(-20999, 'internal error: findSpfileBackup_1'); END IF; LOOP valRC := NULL; FETCH findSpfileBackup_c INTO bsfrec; EXIT WHEN findSpfileBackup_c%NOTFOUND ; IF (rmanCmd = obsoleteCmd_t) THEN -- The cursor already validated the backupset. Obsolete command is -- not interested in tag. So, we found a backupset valRC := SUCCESS; ELSE valRC := validateBackupSet( backupSetRec => bsfrec, tag => restoreTag, tagMatchRequired => TRUE, checkDeviceIsAllocated => TRUE, availableMask => BSavailable, validRec => validateRec); END IF; <> IF (getBS_status IS NULL AND valRC = AVAILABLE) THEN getBS_status := valRC; END IF; IF (debug) THEN printRcvRec(bsfRec); END IF; IF (valRC = SUCCESS) THEN IF (thisBackupAge < rcvRecBackupAge) THEN deb(DEB_IN, 'skipping action because thisBackupAge (' || thisBackupAge || ') < rcvRecBackupAge(' || rcvRecBackupAge || ')'); thisBackupAge := thisBackupAge + 1; ELSIF (rmanCmd = obsoleteCmd_t) THEN deb(DEB_IN, ' Added bsfRec:'); rcvRecPush(bsfRec); -- add record for this action getBS_status := SUCCESS; IF (lcopies > 1) THEN lcopies := lcopies - 1; ELSE EXIT; -- we are done END IF; ELSE deb(DEB_IN, ' Added bsfRec:'); bsfrec.tag_con := validateRec.tag; bsfrec.deviceType_con := validateRec.deviceType; bsfrec.copyNumber_con := validateRec.copyNumber; rcvRecPush(bsfRec); -- add record for this action getBS_status := SUCCESS; IF (allCopies) THEN -- requested duplex copies -- Keep calling validateBackupSet0 until it fails to return -- SUCCESS. valRC := validateBackupSet0(tag => restoreTag, tagMatchRequired => TRUE, checkDeviceIsAllocated => TRUE, validRec => validateRec); IF (valRC = SUCCESS) THEN GOTO addAnother; END IF; END IF; IF (lcopies > 1) THEN lcopies := lcopies - 1; ELSE EXIT; -- we are done END IF; END IF; END IF; END LOOP; CLOSE findSpfileBackup_c; IF (getBS_status = AVAILABLE) THEN deb(DEB_EXIT, 'with: AVAILABLE'); ELSIF (getBS_status = SUCCESS) THEN deb(DEB_EXIT, 'with: SUCCESS'); ELSE getBS_status := UNAVAILABLE; deb(DEB_EXIT, 'with: UNAVAILABLE'); END IF; END findSpfileBackup; ----------------------------- getSpFileBackup ---------------------------- FUNCTION getSpfileBackup( rcvRec OUT NOCOPY rcvRec_t ,redundancy IN number default NULL ,rmanCmd IN number default unknownCmd_t) RETURN number IS local rcvRec_t; dummy number; BEGIN deb(DEB_ENTER, 'getSpfileBackup'); IF (NOT findSpfileBackupCursor) THEN -- Prior 10i version doesn't call findSpfileBackupCursor findSpfileBackup(allcopies => FALSE, redundancy => redundancy, rmanCmd => rmanCmd, scn_warn => dummy); END IF; IF (getRecStackCount = 0) THEN IF (getBS_status = SUCCESS) THEN deb(DEB_EXIT, 'with no more records'); raise no_data_found; ELSIF (getBS_status = AVAILABLE) THEN deb(DEB_EXIT, 'with: AVAILABLE'); RETURN AVAILABLE; ELSE deb(DEB_EXIT, 'with: UNAVAILABLE'); RETURN UNAVAILABLE; END IF; END IF; rcvRecPop(local); IF (debug) THEN printRcvRec(local); END IF; rcvRec := local; deb(DEB_EXIT, 'with: SUCCESS'); RETURN SUCCESS; END getSpfileBackup; ----------------------------- getCopyofDatafile ---------------------------- PROCEDURE getCopyofDatafile( first IN boolean ,itag IN varchar2 ,fno OUT number ,crescn OUT number ,rlogscn OUT number ,rlgtime OUT date ,recid OUT binary_integer ,stamp OUT binary_integer ,name OUT varchar2 ,otag OUT varchar2 ,status OUT varchar2 ,nblocks OUT binary_integer ,bsz OUT binary_integer ,ctime OUT date ,toscn OUT number ,totime OUT date ,pluggedRonly OUT binary_integer ,pluginSCN OUT number ,pluginRlgSCN OUT number ,pluginRlgTime OUT date) IS BEGIN deb(DEB_ENTER, 'getCopyofDatafile'); IF first THEN IF (getCopyofDatafile_c2%ISOPEN) THEN CLOSE getCopyofDatafile_c2; END IF; deb(DEB_PRINT, 'opening cursor'); OPEN getCopyofDatafile_c2(itag); END IF; FETCH getCopyofDatafile_c2 INTO fno, crescn, rlogscn, rlgtime, recid, stamp, name, otag, status, nblocks, bsz, ctime, toscn, totime, pluggedRonly, pluginSCN, pluginRlgSCN, pluginRlgTime; IF getCopyofDatafile_c2%NOTFOUND THEN deb(DEB_PRINT, 'closing cursor'); CLOSE getCopyofDatafile_c2; deb(DEB_EXIT, 'with: no data found'); RAISE no_data_found; END IF; END getCopyOfDatafile; -- Obsolete as of 11.2.0.3 ----------------------------- getCopyofDatafile ---------------------------- PROCEDURE getCopyofDatafile( dfnumber IN number ,itag IN varchar2 ,crescn IN OUT number ,rlogscn IN OUT number ,rlgtime IN OUT date ,recid OUT binary_integer ,stamp OUT binary_integer ,name OUT varchar2 ,otag OUT varchar2 ,status OUT varchar2 ,nblocks OUT binary_integer ,bsz OUT binary_integer ,ctime OUT date ,toscn OUT number ,totime OUT date ,pluggedRonly OUT binary_integer ,pluginSCN IN number) IS BEGIN OPEN getCopyofDatafile_c(dfnumber, itag, crescn, rlogscn, rlgtime, pluginSCN); FETCH getCopyofDatafile_c INTO recid, stamp, name, otag, status, nblocks, bsz, ctime, toscn, totime, crescn, rlogscn, rlgtime, pluggedRonly; IF getCopyofDatafile_c%NOTFOUND THEN CLOSE getCopyofDatafile_c; RAISE no_data_found; END IF; CLOSE getCopyofDatafile_c; END getCopyOfDatafile; -- Obsolete as of 11g PROCEDURE getCopyofDatafile( dfnumber IN number ,itag IN varchar2 ,crescn IN number ,rlogscn IN number ,rlgtime IN date ,recid OUT binary_integer ,stamp OUT binary_integer ,name OUT varchar2 ,otag OUT varchar2 ,status OUT varchar2 ,nblocks OUT binary_integer ,bsz OUT binary_integer ,ctime OUT date ,toscn OUT number ,totime OUT date) IS loc_crescn number := crescn; loc_rlogscn number := rlogscn; loc_rlgtime date := rlgtime; pluggedRonly binary_integer; pluginSCN number; BEGIN getCopyOfDatafile(dfnumber => dfnumber ,itag => itag ,crescn => loc_crescn ,rlogscn => loc_rlogscn ,rlgtime => loc_rlgtime ,recid => recid ,stamp => stamp ,name => name ,otag => otag ,status => status ,nblocks => nblocks ,bsz => bsz ,ctime => ctime ,toscn => toscn ,totime => totime ,pluggedRonly => pluggedRonly ,pluginSCN => 0); END getCopyOfDatafile; ------------------- -- Query Filters -- ------------------- ------------------------------ setCompletedRange ------------------------------ PROCEDURE setCompletedRange( after IN date ,before IN date) IS BEGIN getRA_completedAfter := after; getRA_completedBefore := before; END setCompletedRange; -------------------------------- setLikePattern ------------------------------- PROCEDURE setLikePattern( pattern IN varchar2) IS BEGIN getRA_likePattern := pattern; END setLikePattern; -------------------------------- setCanApplyAnyRedo --------------------------- PROCEDURE setcanApplyAnyRedo( flag IN boolean) IS BEGIN IF (flag) THEN deb(DEB_PRINT, 'canApplyAnyRedo is set to TRUE'); canApplyAnyRedo := TRUE#; ELSE deb(DEB_PRINT, 'canApplyAnyRedo is set to FALSE'); canApplyAnyRedo := FALSE#; END IF; END setcanApplyAnyRedo; -------------------------------- setCanConvertCf --------------------------- PROCEDURE setCanConvertCf( flag IN boolean) IS BEGIN IF (flag) THEN deb(DEB_PRINT, 'canConvert_Cf is set to TRUE'); canConvert_Cf := TRUE#; ELSE deb(DEB_PRINT, 'canConvert_Cf is set to FALSE'); canConvert_Cf := FALSE#; END IF; END setCanConvertCf; ------------------------------ setAllIncarnations ----------------------------- PROCEDURE setAllIncarnations( flag IN boolean) IS BEGIN IF (flag) THEN deb(DEB_PRINT, 'allIncarnations is set to TRUE'); allIncarnations := TRUE#; ELSE deb(DEB_PRINT, 'allIncarnations is set to FALSE'); allIncarnations := FALSE#; END IF; END setAllIncarnations; ----------------------------- isTranslatedFno -------------------------------- FUNCTION isTranslatedFno(fno IN NUMBER) RETURN NUMBER IS BEGIN IF (tc_database = TRUE# OR fno = 0 OR tc_fno.exists(fno)) THEN RETURN TRUE#; ELSE RETURN FALSE#; END IF; END isTranslatedFno; ---------------------------- setUntilResetlogs ------------------------------- PROCEDURE setUntilResetlogs IS BEGIN untilSCN := this_reset_scn; untilTime := NULL; rpoint_set := FALSE; IF (this_reset_scn is NULL) THEN raise_application_error(-20020, 'Database incarnation not set'); END IF; END setUntilResetlogs; -------------------------- -- Tempfile Translation -- -------------------------- ------------------------------ translateTempfile ------------------------------ PROCEDURE translateTempfile IS BEGIN IF (translateTempfile_c%ISOPEN) THEN validateState('translateTempfile_c'); -- raise the error END IF; OPEN translateTempfile_c; getTempfileCursor := 'translateTempfile_c'; END translateTempfile; ------------------------------ translateTempfile ------------------------------ PROCEDURE translateTempfile( fname IN varchar2) IS BEGIN IF (translateTempfileName_c%ISOPEN) THEN validateState('translateTempfileName_c'); -- raise the error END IF; OPEN translateTempfileName_c(fileName => fname); getTempfileCursor := 'translateTempfileName_c'; END translateTempfile; ------------------------------ translateTempFile ------------------------------ PROCEDURE translateTempfile( fno IN number) IS BEGIN IF (translateTempfileNumber_c%ISOPEN) THEN validateState('translateTempfileNumber_c'); -- raise the error END IF; OPEN translateTempfileNumber_c(fno => fno); getTempfileCursor := 'translateTempfileNumber_c'; END translateTempfile; --------------------------------- getTempfile -------------------------------- PROCEDURE getTempfile( tfRec OUT NOCOPY tfRec_t) IS eof boolean := FALSE; BEGIN IF (getTempfileCursor = 'translateTempfile_c') THEN FETCH translateTempfile_c INTO tfRec; IF (translateTempfile_c%NOTFOUND) THEN CLOSE translateTempfile_c; eof := TRUE; END IF; ELSIF (getTempfileCursor = 'translateTempfileName_c') THEN FETCH translateTempfileName_c INTO tfRec; IF (translateTempfileName_c%NOTFOUND) THEN CLOSE translateTempfileName_c; eof := TRUE; END IF; ELSIF (getTempfileCursor = 'translateTempfileNumber_c') THEN FETCH translateTempfileNumber_c INTO tfRec; IF (translateTempfileNumber_c%NOTFOUND) THEN CLOSE translateTempfileNumber_c; eof := TRUE; END IF; ELSE deb(DEB_EXIT, 'with error 20204'); raise_application_error(-20204, 'Translation not started'); END IF; IF (eof) THEN getTempfileCursor := NULL; RAISE no_data_found; -- signal end-of-fetch END IF; END getTempfile; -------------------------- translateDatafileCancel ------------------------- PROCEDURE translateDatafileCancel IS BEGIN IF (getDatafileCursor = 'translateDatafileName') THEN CLOSE translateDatafileName; ELSIF (getDatafileCursor = 'translateDatafileNumber') THEN CLOSE translateDatafileNumber; ELSIF (getDatafileCursor = 'translateDatafileCheckpoint') THEN CLOSE translateDatafileCheckpoint; END IF; getDatafileCursor := NULL; -- we closed it above getDatafileNoRows.error := NULL; -- clear for next time getDatafileLast.dfNumber := NULL; -- clear for next time END translateDatafileCancel; -------------------------- Num2DisplaySize --------------------------------- FUNCTION Num2DisplaySize(input_size IN NUMBER) return VARCHAR2 IS OneK number := 1024; OneM number := OneK * 1024; OneG number := OneM * 1024; OneT number := OneG * 1024; OneP number := OneT * 1024; BEGIN IF input_size IS NULL THEN return NULL; ELSE IF (input_size < OneM) THEN return to_char(input_size/OneK,'9990.09') ||'K'; ELSIF (input_size < OneG) THEN return to_char(input_size/OneM,'9990.09') ||'M'; ELSIF (input_size < OneT) THEN return to_char(input_size/OneG,'9990.09') ||'G'; ELSIF (input_size < OneP) THEN return to_char(input_size/OneT,'9990.09') ||'T'; ELSE return to_char(input_size/OneP,'9990.09') ||'P'; END IF; END IF; END Num2DisplaySize; -------------------------- Sec2DisplayTime ------------------------------- FUNCTION Sec2DisplayTime(input_secs IN NUMBER) return VARCHAR2 IS BEGIN IF input_secs IS NULL THEN return NULL; END IF; RETURN to_char(floor(input_secs/3600),'FM09')||':'|| to_char(floor(mod(input_secs,3600)/60),'FM09')||':'|| to_char(mod(input_secs,60),'FM09'); END Sec2DisplayTime; ------------------------ setArchivedLogRecord ----------------------------- PROCEDURE setArchivedLogRecord( thread# IN number ,sequence# IN number ,first IN boolean) IS seqTab sequenceTab_t; thrbck binary_integer; seqbck binary_integer; BEGIN IF first THEN tc_threadSeq.delete; END IF; -- thread# wrapped around 2G to keep things simple IF (thread# >= CONST2GVAL) THEN thrbck := CONST2GVAL - thread#; ELSE thrbck := thread#; END IF; -- sequence# wrapped around 2G to keep things simple IF (sequence# >= CONST2GVAL) THEN seqbck := CONST2GVAL - sequence#; ELSE seqbck := sequence#; END IF; IF NOT tc_threadSeq.exists(thrbck) THEN tc_threadSeq(thrbck) := seqTab; END IF; tc_threadseq(thrbck)(seqbck) := TRUE; END setArchivedLogRecord; ------------------------ setCanHandleTransportableTbs ------------------------- PROCEDURE setCanHandleTransportableTbs( flag IN boolean) IS BEGIN IF (flag) THEN deb(DEB_PRINT, 'canHandleTransportableTbs is set to TRUE'); canHandleTransportableTbs := TRUE#; ELSE deb(DEB_PRINT, 'canHandleTransportableTbs is set to FALSE'); canHandleTransportableTbs := FALSE#; END IF; END setCanHandleTransportableTbs; ------------------------------ getRestorePoint ------------------------------- PROCEDURE getRestorePoint( name IN varchar2 ,rlgscn OUT number ,rlgtime OUT date ,scn OUT number ,guaranteed OUT number) IS rsp restore_point_c%ROWTYPE; BEGIN deb(DEB_ENTER, 'getRestorePoint'); IF (restore_point_c%isopen) THEN CLOSE restore_point_c; END IF; OPEN restore_point_c(name); FETCH restore_point_c INTO rsp; IF restore_point_c%NOTFOUND THEN rlgscn := NULL; rlgtime := NULL; scn := NULL; guaranteed := NULL; ELSE rlgscn := rsp.reset_scn; rlgtime := rsp.reset_time; scn := rsp.scn; IF (rsp.guaranteed = 'YES') THEN guaranteed := 1; ELSE guaranteed := 0; END IF; END IF; -- Verify that we have only one restore point with that name. FETCH restore_point_c INTO rsp; IF NOT restore_point_c%NOTFOUND THEN deb(DEB_EXIT, 'with error 20513'); raise_application_error(-20513, 'Restore point name is ambiguous, specify the SCN instead'); END IF; CLOSE restore_point_c; deb(DEB_EXIT); END getRestorePoint; ----------------------- listTranslateRestorePoint ----------------------------- PROCEDURE listTranslateRestorePoint( name IN varchar2) IS BEGIN deb(DEB_ENTER, 'listTranslateRestorePoint'); IF (restore_point_c%isopen) THEN CLOSE restore_point_c; END IF; deb(DEB_OPEN, 'restore_point_c'); OPEN restore_point_c(name => name); deb(DEB_EXIT); END listTranslateRestorePoint; ----------------------- listGetRestorePoint ----------------------------- PROCEDURE listGetRestorePoint( name OUT varchar2 ,scn OUT number ,rsptime OUT date ,cretime OUT date ,rsptype OUT varchar2) IS rsp restore_point_c%ROWTYPE; BEGIN deb(DEB_ENTER, 'listGetRestorePoint'); <> FETCH restore_point_c INTO rsp; IF (restore_point_c%NOTFOUND) THEN CLOSE restore_point_c; deb(DEB_EXIT, 'with no more records'); RAISE no_data_found; END IF; name := rsp.name; scn := rsp.scn; rsptime := rsp.restore_point_time; cretime := rsp.creation_time; -- Get restore point type. rsptype := ' '; IF rsp.preserved = 'YES' THEN rsptype := 'PRESERVED'; END IF; -- Guaranteed restore points are also preserved, so check guaranteed last IF rsp.guaranteed = 'YES' THEN rsptype := 'GUARANTEED'; END IF; deb(DEB_EXIT); END listGetRestorePoint; --------------------------- setDbidTransClause -------------------------------- PROCEDURE setDbidTransClause(dbid IN number) IS dbidbck binary_integer; BEGIN -- dbid wrapped around 2G to keep things simple IF (dbid >= CONST2GVAL) THEN dbidbck := CONST2GVAL - dbid; ELSE dbidbck := dbid; END IF; tc_dbid(dbidbck) := TRUE; END setDbidTransClause; ----------------------------- isTranslatedDbid ------------------------------- FUNCTION isTranslatedDbid(dbid IN NUMBER) RETURN NUMBER IS dbidbck binary_integer; BEGIN IF (tc_anydbid = TRUE#) THEN RETURN TRUE#; ELSE -- dbid wrapped around 2G to keep things simple IF (dbid >= CONST2GVAL) THEN dbidbck := CONST2GVAL - dbid; ELSE dbidbck := dbid; END IF; IF (tc_dbid.exists(dbidbck)) THEN RETURN TRUE#; END IF; END IF; RETURN FALSE#; END isTranslatedDbid; -- END_PUBCOMMON_RCVMAN_CODE --------------------------- -- Package Instantiation -- --------------------------- BEGIN -- initialize version list. -- change version_max_index when adding new values. versionList(1) := '08.00.04.00'; versionList(2) := '08.00.05.00'; versionList(3) := '08.01.03.00'; versionList(4) := '08.01.05.00'; versionList(5) := '08.01.06.00'; versionList(6) := '08.01.07.00'; versionList(7) := '09.00.00.00'; versionList(8) := '09.02.00.00'; versionList(9) := '10.01.00.00'; versionList(10):= '10.02.00.00'; versionList(11):= '10.02.00.01'; versionList(12):= '11.01.00.00'; versionList(13):= '11.01.00.01'; versionList(14):= '11.01.00.02'; versionList(15):= '11.01.00.03'; versionList(16):= '11.01.00.04'; versionList(17):= '11.01.00.05'; versionList(18):= '11.01.00.06'; versionList(19):= '11.01.00.07'; versionList(20):= '11.02.00.00'; versionList(21):= '11.02.00.01'; versionList(22):= '11.02.00.02'; versionList(23):= '11.02.00.03'; -- initialize version list iterator variables versionCounter := 1; versionMaxIndex := 23; -- must match highest index used above resetAll; -- init package variables to defaults END dbms_rcvman; >>> # ######################################################################### # Upgrade Recovery Catalog # # # # If new members are added here, the static array krmkupg_libunits must # # be updated with the new member names. # # # # This code applies all changes from 8.0.2 through the current version. # # krmk will ignore errors for columns and views that already exist, # # because that just means that the catalog wasn't so old. # # # # Some members from CREATE CATALOG are also used during upgrade. # # The order in which these members are executed is defined in krmkupg, # # and is important. For example, the rcver table is manipulated last # # so that the UPGRADE CATALOG command can be re-run if it fails # # midstream. # # # # ######################################################################### # 8.0.2 -> 8.0.3 define upgcat_1 <<< alter table ckp add (ckp_db_status VARCHAR2(7)) >>> define upgcat_2 <<< alter table ts add CONSTRAINT ts_u2 UNIQUE (dbinc_key, ts_name, create_scn) >>> define upgcat_3 <<< alter table df add (clone_fname VARCHAR2(1024)) >>> define upgcat_4 <<< alter table bdf add (completion_time date) >>> define upgcat_5 <<< alter table offr add (cf_create_time date) >>> define upgcat_6 <<< alter table offr drop constraint offr_u2 drop index >>> define upgcat_7 <<< alter table offr add constraint offr_u2 UNIQUE (dbinc_key, file#, create_scn, offline_scn, cf_create_time) >>> # 8.0.3 -> 8.0.4 define upgcat_9 <<< alter table df add (stop_scn number) >>> define upgcat_10 <<< alter table df add (read_only number) >>> define upgcat_11 <<< update df set stop_scn = null, read_only = 0 >>> define upgcat_12 <<< alter table df modify (read_only not null) >>> # 8.0.5 -> 8.1.3 define upgcat_15 <<< alter table bp add (media_pool number) >>> define upgcat_16 <<< alter table bp add (copy# number) >>> define upgcat_17 <<< update bp set copy# = 1 where copy# is null>>> define upgcat_18 <<< alter table bp modify (copy# not null) >>> define upgcat_19 <<< alter table bp drop constraint bp_c_status drop index >>> define upgcat_20 <<< alter table bp add constraint bp_c_status check (status in ('A','U','D','X')) >>> define upgcat_21 <<< alter table dbinc add (high_pc_recid number default 0) >>> define upgcat_22 <<< update dbinc set high_pc_recid = 0 >>> define upgcat_23 <<< alter table dbinc modify (high_pc_recid not null) >>> define upgcat_24 <<< alter table rlh add constraint rlh_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE >>> define upgcat_25 <<< alter table al add constraint al_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE >>> define upgcat_26 <<< alter table ccf add constraint ccf_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE >>> define upgcat_27 <<< alter table xcf add constraint xcf_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE >>> define upgcat_28 <<< alter table cdf add constraint cdf_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE >>> define upgcat_29 <<< alter table xdf add constraint xdf_f1 FOREIGN KEY (dbinc_key) REFERENCES dbinc ON DELETE CASCADE >>> define upgcat_30 <<< alter table bs modify (bs_recid NULL) >>> define upgcat_31 <<< alter table bs modify (bs_stamp NULL) >>> define upgcat_32 <<< alter table bs modify (bck_type NULL) >>> define upgcat_33 <<< alter table bs modify (incr_level NULL) >>> define upgcat_34 <<< alter table bs modify (status NULL) >>> define upgcat_35 <<< alter table bs drop constraint bs_u1 drop index >>> define upgcat_36 <<< alter table bs modify (start_time NULL) >>> define upgcat_37 <<< alter table bs modify (completion_time NULL) >>> define upgcat_38 <<< alter table df add (stop_time date) >>> # # 8.1.7 part: # define upgcat_39 <<< alter table bcf add (controlfile_type varchar2(1) NULL) >>> define upgcat_40 <<< alter table bcf add CONSTRAINT bcf_c_cf_type CHECK (controlfile_type in ('S','B')) >>> define upgcat_41 <<< alter table bs add (controlfile_included varchar2(7) NULL) >>> define upgcat_42 <<< alter table bs add CONSTRAINT bs_c_controlfile_included CHECK (controlfile_included in ('NONE','BACKUP','STANDBY')) >>> define upgcat_43 <<< alter table ccf add (controlfile_type varchar2(1) NULL) >>> define upgcat_44 <<< alter table ccf add CONSTRAINT ccf_c_cf_type CHECK (controlfile_type in ('S','B')) >>> define upgcat_45 <<< alter table xcf add (controlfile_type varchar2(1) NULL) >>> define upgcat_46 <<< alter table xcf add CONSTRAINT xcf_c_cf_type CHECK (controlfile_type in ('S','B')) >>> define upgcat_47 <<< alter table al drop constraint al_u1 drop index >>> define upgcat_48 <<< alter table al add (is_standby varchar2(1) NULL) >>> define upgcat_49 <<< alter table al add constraint al_u1 UNIQUE (dbinc_key, al_recid, al_stamp, is_standby) >>> define upgcat_50 <<< alter table al add constraint al_c_is_standby CHECK (is_standby in ('Y','N')) >>> define upgcat_51 <<< alter table bs add (input_file_scan_only varchar2(3)) >>> # # 9iR1 part: # define upgcat_52 <<< alter table al drop constraint al_c_status drop index >>> define upgcat_53 <<< alter table al add constraint al_c_status check (status in ('A','U','D','X')) >>> define upgcat_54 <<< alter table ccf drop constraint ccf_c_status drop index >>> define upgcat_55 <<< alter table ccf add constraint ccf_c_status check (status in ('A','U','D','X')) >>> define upgcat_56 <<< alter table cdf drop constraint cdf_c_status drop index >>> define upgcat_57 <<< alter table cdf add constraint cdf_c_status check (status in ('A','U','D','X')) >>> # RMAN configuration stuff define upgcat_58 <<< alter table db add (high_conf_recid number) >>> define upgcat_59 <<< alter table df add (rfile# number) >>> define upgcat_60 <<< alter table al add (dictionary_begin varchar2(3)) >>> define upgcat_61 <<< alter table al add (dictionary_end varchar2(3)) >>> define upgcat_62 <<< alter table ts add (included_in_database_backup varchar2(3) default 'YES') >>> # keep stuff define upgcat_63 <<< alter table bs add (keep_options NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_64 <<< alter table bs add (keep_until DATE NULL) >>> define upgcat_65 <<< alter table xdf add (keep_options NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_66 <<< alter table xdf add (keep_until DATE NULL) >>> define upgcat_67 <<< alter table xcf add (keep_options NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_68 <<< alter table xcf add (keep_until DATE NULL) >>> define upgcat_69 <<< alter table cdf add (keep_options NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_70 <<< alter table cdf add (keep_until DATE NULL) >>> define upgcat_71 <<< alter table ccf add (keep_options NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_72 <<< alter table ccf add (keep_until DATE NULL) >>> # Misc stuff define upgcat_73 <<< alter table bcf add (autobackup_date date) >>> define upgcat_74 <<< alter table bcf add (autobackup_sequence number) >>> define upgcat_75 <<< alter table ts add (included_in_database_backup varchar2(3) default 'YES') >>> define upgcat_76 <<< alter table bcf add (blocks number) >>> define upgcat_77 <<< alter table ckp drop constraint ckp_f2 drop index >>> define upgcat_78 <<< alter table ckp drop column prev_ckp_key >>> # # 9iR2 part: # define upgcat_79 <<< alter table cdf add (scanned varchar2(1)) >>> define upgcat_80 <<< alter table bcb add (corruption_type varchar2(9)) >>> define upgcat_81 <<< alter table ccb add (corruption_type varchar2(9)) >>> define upgcat_82 <<< alter table db add (last_kccdivts number default 0) >>> define upgcat_99 <<< -- This plsql procedure fixes the copy# if needed declare cursor broken_pieces is select bs_key, piece# from bp where copy# = 1 group by bs_key, piece# having count(piece#) > 1; cursor fix_piece(bskey number, pno number) is select bp_key from bp where bs_key = fix_piece.bskey and piece# = fix_piece.pno for update of copy#; rows_cnt number; rows_per_iteration number := 500; begin loop rows_cnt := 0; for bp in broken_pieces loop for fp in fix_piece(bp.bs_key, bp.piece#) loop rows_cnt := fix_piece%rowcount; update bp set copy# = rows_cnt where current of fix_piece; end loop; rows_cnt := broken_pieces%rowcount; exit when rows_cnt = rows_per_iteration; end loop; commit; exit when rows_cnt < rows_per_iteration; end loop; end; >>> # SPFILE backup stuff define upgcat_100 <<< alter table dbinc add (high_bsf_recid NUMBER DEFAULT 0 NOT NULL) >>> # 9iR2 has bug-2280512 which prevents replacing packages with pipeliened # functions. So, for 9iR2 we need to drop dbms_rcvman!!! define upgcat_101 <<< drop package dbms_rcvman >>> # # 10iR1 part: # # Bigfile Tablespace flag define upgcat_102 <<< alter table ts add (bigfile varchar2(3) default 'NO' NOT NULL) >>> define upgcat_103 <<< alter table dbinc add(dbinc_status VARCHAR2(8) DEFAULT 'ORPHAN' NOT NULL) >>> define upgcat_104 <<< alter table dbinc add constraint dbinc_status CHECK(dbinc_status in ('CURRENT', 'PARENT', 'ORPHAN')) >>> # #define upgcat_105 #<<< UNUSED >>> # define upgcat_106 <<< alter table al add (is_recovery_dest_file VARCHAR2(3) DEFAULT 'NO' NOT NULL) >>> define upgcat_107 <<< alter table cdf add (is_recovery_dest_file VARCHAR2(3) DEFAULT 'NO' NOT NULL) >>> define upgcat_108 <<< alter table ccf add (is_recovery_dest_file VARCHAR2(3) DEFAULT 'NO' NOT NULL) >>> define upgcat_109 <<< alter table bp add (is_recovery_dest_file VARCHAR2(3) DEFAULT 'NO' NOT NULL) >>> define upgcat_110 <<< alter table bp add (bytes NUMBER DEFAULT NULL) >>> # RMAN Status resync stuff define upgcat_111 <<< alter table dbinc add (high_rsr_recid NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_112 <<< alter table bp add (rsr_key number) >>> define upgcat_113 <<< alter table cdf add (rsr_key number) >>> define upgcat_114 <<< alter table ccf add (rsr_key number) >>> define upgcat_115 <<< alter table xdf add (rsr_key number) >>> define upgcat_116 <<< alter table xcf add (rsr_key number) >>> #fix incompabilities between create/upgrade schema define upgcat_117 <<< update cdf set scanned='N' where scanned is null >>> define upgcat_118 <<< alter table cdf modify(scanned varchar2(1) DEFAULT 'N') >>> define upgcat_119 <<< alter table cdf modify(scanned varchar2(1) NOT NULL) >>> define upgcat_120 <<< alter table db modify(last_kccdivts NUMBER DEFAULT 0) >>> define upgcat_121 <<< update ts set included_in_database_backup='YES' where included_in_database_backup is null >>> define upgcat_122 <<< alter table ts modify(included_in_database_backup DEFAULT 'YES') >>> define upgcat_123 <<< alter table ts modify(included_in_database_backup NOT NULL) >>> define upgcat_124 <<< alter table bp add (compressed varchar2(3) DEFAULT 'NO') >>> define upgcat_125 <<< alter table al add (compressed varchar2(3) DEFAULT 'NO') >>> define upgcat_126 <<< alter table scr add (scr_comment varchar2(255)) >>> define upgcat_127 <<< alter table scr modify db_key null >>> define upgcat_128 <<< update bs set keep_options = 0 where keep_options is null >>> define upgcat_129 <<< alter table bs modify(keep_options NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_130 <<< update xdf set keep_options = 0 where keep_options is null >>> define upgcat_131 <<< alter table xdf modify(keep_options NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_132 <<< update xcf set keep_options = 0 where keep_options is null >>> define upgcat_133 <<< alter table xcf modify(keep_options NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_134 <<< update cdf set keep_options = 0 where keep_options is null >>> define upgcat_135 <<< alter table cdf modify(keep_options NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_136 <<< update ccf set keep_options = 0 where keep_options is null >>> define upgcat_137 <<< alter table ccf modify(keep_options NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_138 <<< alter table conf add (db_unique_name VARCHAR2(512) DEFAULT NULL, cleanup VARCHAR2(3) DEFAULT 'YES') >>> define upgcat_139 <<< alter table conf drop constraint conf_p1 drop index >>> define upgcat_140 <<< alter table conf add constraint conf_f1 FOREIGN KEY(db_key) REFERENCES db ON DELETE CASCADE >>> define upgcat_141 <<< alter table bdf add (blocks_read number) >>> define upgcat_142 <<< update bdf set blocks_read = datafile_blocks where blocks_read is null >>> define upgcat_143 <<< alter table bdf modify(blocks_read NOT NULL) >>> define bp_i_rsr <<< CREATE INDEX bp_i_rsr on bp(rsr_key) &tablespace& >>> define cdf_i_rsr <<< CREATE INDEX cdf_i_rsr on cdf(rsr_key) &tablespace& >>> define ccf_i_rsr <<< CREATE INDEX ccf_i_rsr on ccf(rsr_key) &tablespace& >>> define xdf_i_rsr <<< CREATE INDEX xdf_i_rsr on xdf(rsr_key) &tablespace& >>> define xcf_i_rsr <<< CREATE INDEX xcf_i_rsr on xcf(rsr_key) &tablespace& >>> define xal_i_rsr <<< CREATE INDEX xal_i_rsr on xal(rsr_key) &tablespace& >>> define tsatt_i_sck <<< CREATE INDEX tsatt_i_sck on tsatt(start_ckp_key) &tablespace& >>> define tsatt_i_eck <<< CREATE INDEX tsatt_i_eck on tsatt(end_ckp_key) &tablespace& >>> define ckp_i_dbinc <<< CREATE INDEX ckp_i_dbinc on ckp(dbinc_key) &tablespace& >>> define rsr_i_dbinc <<< CREATE INDEX rsr_i_dbinc on rsr(dbinc_key) &tablespace& >>> define rsr_i_stamp <<< CREATE INDEX rsr_i_stamp on rsr(rsr_sstamp, rsr_srecid) &tablespace& >>> define rout_i_db <<< CREATE INDEX rout_i_db on rout(db_key) &tablespace& >>> define rout_i_rsr <<< CREATE INDEX rout_i_rsr on rout(rsr_key) &tablespace& >>> define rout_i_skey <<< CREATE INDEX rout_i_skey on rout(rout_skey) &tablespace& >>> # last_kccdivts not used for incarnation resync from 10G onwards. define upgcat_144 <<< alter table db add (high_ic_recid NUMBER DEFAULT 0) >>> # #10gR2 upgrade part: # define upgcat_145 <<< alter table bs add (block_size NUMBER DEFAULT NULL) >>> define upgcat_146 <<< -- This plsql procedure fills in the bs block_size that were previously -- null. -- NOTE!! NOTE!! - we can't fix for spfile only backuppiece and pieces -- that were created using < 10gR2 rman exec with 10gR2 and higher schema. -- declare cursor null_bs is select bs_key from bs where block_size is null; calblksize number; function getBSBlockSize(bs_key number) return number is blksize number := 0; begin begin select max(bdf.block_size) into blksize from bdf, bs where bdf.bs_key = bs.bs_key and bs.bs_key = getBSBlockSize.bs_key; return blksize; exception when no_data_found then null; end; begin select max(brl.block_size) into blksize from brl, bs where brl.bs_key = bs.bs_key and bs.bs_key = getBSBlockSize.bs_key; return blksize; exception when no_data_found then null; end; begin select max(bcf.block_size) into blksize from bcf, bs where bcf.bs_key = bs.bs_key and bs.bs_key = getBSBlockSize.bs_key; return blksize; exception when no_data_found then null; end; return null; end getBSBlockSize; begin for bs_rec in null_bs loop calblksize := getBSBlockSize(bs_rec.bs_key); if (calblksize != 0 and calblksize is not null) then update bs set block_size = calblksize where bs_key = bs_rec.bs_key; end if; end loop; end; >>> define upgcat_147 <<< alter table rsr add (rsr_ibytes NUMBER, rsr_obytes NUMBER, rsr_optimized VARCHAR2(3), rsr_otype VARCHAR2(80), rsr_srecid number, rsr_sstamp number, rsr_odevtype VARCHAR2(17)) >>> define upgcat_148 <<< alter table node add (high_rout_stamp NUMBER default 0, inst_startup_stamp NUMBER default 0) >>> define upgcat_149 <<< alter table ccf add (blocks NUMBER DEFAULT NULL) >>> define upgcat_150 <<< alter table xcf add (blocks NUMBER DEFAULT NULL) >>> define upgcat_151 <<< alter table cdf add (create_time DATE DEFAULT NULL, marked_corrupt NUMBER DEFAULT NULL) >>> define upgcat_152 <<< alter table bdf add (create_time DATE DEFAULT NULL, marked_corrupt NUMBER DEFAULT NULL, used_chg_track VARCHAR2(1) DEFAULT 'N', used_optim VARCHAR2(1) DEFAULT 'N') >>> define upgcat_153 <<< alter table xdf add (create_time DATE DEFAULT NULL) >>> define upgcat_154 <<< alter table fb add (oldest_flashback_time DATE DEFAULT NULL) >>> define upgcat_155 <<< alter table al add (creator VARCHAR2(7) DEFAULT NULL) >>> define upgcat_156 <<< alter table dbinc add (high_tf_recid NUMBER DEFAULT 0 NOT NULL) >>> define upgcat_157 <<< alter table ts add (temporary varchar2(3) default 'NO' NOT NULL) >>> define upgcat_158 <<< -- This plsql procedure removes node and its configuration entry if there -- is no database_role added to node table and there are more than one -- db_unique_name registered for a database. -- declare is_db_role number; no_conf number; cursor node_c is select db_key, count(*) no_db_unique_name from node group by db_key; cursor db_c is select db_key from db; begin select count(*) into is_db_role from user_tab_columns where table_name = 'NODE' and column_name = 'DATABASE_ROLE'; if is_db_role = 0 then for noderec in node_c loop if (noderec.no_db_unique_name > 1) then delete from node where node.db_key = noderec.db_key; delete from conf where conf.db_key = noderec.db_key; update db set db.high_conf_recid = 0 where db.db_key = noderec.db_key; end if; end loop; else -- Make sure that high water mark of conf is 0 when there is no -- configuration. This can happen because of a bug# 4719372 -- in upgrade code. If there is a mismatch, reset high water mark -- that would trigger a full resync during next command. for dbrec in db_c loop select count(*) into no_conf from conf where conf.db_key = dbrec.db_key; if no_conf = 0 then update node set node.high_conf_recid = 0 where node.db_key = dbrec.db_key; update db set db.high_conf_recid = 0 where db.db_key = dbrec.db_key; end if; end loop; end if; end; >>> define upgcat_159 <<< -- Change all db_name and db_unique_name to upper case begin update dbinc set dbinc.db_name = upper(dbinc.db_name); update conf set conf.db_unique_name = upper(conf.db_unique_name); update node set node.db_unique_name = upper(node.db_unique_name); update fb set fb.db_unique_name = upper(fb.db_unique_name); end; >>> define upgcat_160 <<< alter table node add (database_role varchar2(7) default 'PRIMARY' NOT NULL) >>> define upgcat_161 <<< alter table node add constraint node_u1 unique(db_key, db_unique_name) >>> define upgcat_162 <<< alter table ts add (encrypt_in_backup varchar2(3)) >>> # #11gR1 upgrade part: # define upgcat_163 <<< alter table al add (terminal VARCHAR2(3) DEFAULT 'NO') >>> define upgcat_164 <<< alter table xal add (terminal VARCHAR2(3) DEFAULT 'NO') >>> define upgcat_165 <<< alter table brl add (terminal VARCHAR2(3) DEFAULT 'NO') >>> define upgcat_166 <<< alter table cdf drop CONSTRAINT cdf_c_status drop index >>> define upgcat_167 <<< alter table cdf add CONSTRAINT cdf_c_status CHECK (status in ('A','U','D','X','F')) >>> define upgcat_168 <<< alter table rlh drop constraint rlh_u2 drop index >>> define upgcat_add_scrl_dbkey <<< alter table scrl add (db_key NUMBER) >>> define upgcat_populate_scrl_dbkey <<< update scrl set db_key = (select db_key from scr where scr.scr_key = scrl.scr_key) >>> define upgcat_bug_4754328 <<< -- This plsql code is added to resolve bug 4754328. Basically, we have -- incorrectly marked the temporary tablespace entries as dropped because of -- this bug. We are fixing those by deleting them if the fix is not already -- installed and all of the temporary tablespace is marked as dropped. -- declare cursor dbinc_c is select dbinc_key from dbinc for update; tempts_cnt number; tempts_fixed number; begin select count(*) into tempts_fixed from user_procedures where object_name='DBMS_RCVCAT' and procedure_name='TEMPFILETORESYNC'; if tempts_fixed = 0 then for dbincrec in dbinc_c loop select count(*) into tempts_cnt from ts where temporary = 'YES' and drop_scn is null and dbinc_key = dbincrec.dbinc_key; if (tempts_cnt = 0) then delete from ts where temporary = 'YES' and dbinc_key = dbincrec.dbinc_key; -- high_tf_recid is in node, added below. So resync will happen end if; end loop; end if; end; >>> define upgcat_df_add_fdbid <<< alter table df add (foreign_dbid number default 0 not null) >>> define upgcat_df_add_fcrescn <<< alter table df add (foreign_create_scn number default 0 not null) >>> define upgcat_df_add_fcretim <<< alter table df add (foreign_create_time date) >>> define upgcat_df_add_pronly <<< alter table df add (plugged_readonly varchar2(3) default 'NO' not null) >>> define upgcat_df_add_plus <<< alter table df add (plugin_scn number default 0 not null) >>> define upgcat_df_add_prlgscn <<< alter table df add (plugin_reset_scn number default 0 not null) >>> define upgcat_df_add_prlgtim <<< alter table df add (plugin_reset_time date) >>> define upgcat_df_drop_df_p_tts <<< alter table df drop constraint df_p drop index >>> define upgcat_df_add_df_p_tts <<< alter table df add constraint df_p primary key (dbinc_key, file#, create_scn, plugin_scn) >>> define upgcat_bdf_add_fdbid <<< alter table bdf add (foreign_dbid number default 0 not null) >>> define upgcat_bdf_add_pronly <<< alter table bdf add (plugged_readonly varchar2(3) default 'NO' not null) >>> define upgcat_bdf_add_plus <<< alter table bdf add (plugin_scn number default 0 not null) >>> define upgcat_bdf_add_prlgscn <<< alter table bdf add (plugin_reset_scn number default 0 not null) >>> define upgcat_bdf_add_prlgtim <<< alter table bdf add (plugin_reset_time date) >>> define upgcat_bdf_add_secsize <<< alter table bdf add (section_size number) >>> define upgcat_cdf_add_fdbid <<< alter table cdf add (foreign_dbid number default 0 not null) >>> define upgcat_cdf_add_pronly <<< alter table cdf add (plugged_readonly varchar2(3) default 'NO' not null) >>> define upgcat_cdf_add_plus <<< alter table cdf add (plugin_scn number default 0 not null) >>> define upgcat_cdf_add_prlgscn <<< alter table cdf add (plugin_reset_scn number default 0 not null) >>> define upgcat_cdf_add_prlgtim <<< alter table cdf add (plugin_reset_time date) >>> define upgcat_xdf_add_fdbid <<< alter table xdf add (foreign_dbid number default 0 not null) >>> define upgcat_xdf_add_pronly <<< alter table xdf add (plugged_readonly varchar2(3) default 'NO' not null) >>> define upgcat_xdf_add_plus <<< alter table xdf add (plugin_scn number default 0 not null) >>> define upgcat_xdf_add_prlgscn <<< alter table xdf add (plugin_reset_scn number default 0 not null) >>> define upgcat_xdf_add_prlgtim <<< alter table xdf add (plugin_reset_time date) >>> define add_siteaware_columns_to_node <<< alter table node add (site_key number default 0 not null, last_kccdivts NUMBER DEFAULT 0, high_ic_recid NUMBER DEFAULT 0, cf_create_time DATE, dbinc_key NUMBER DEFAULT 0 NOT NULL, ckp_scn NUMBER DEFAULT 0 NOT NULL, full_ckp_cf_seq NUMBER DEFAULT 0 NOT NULL, job_ckp_cf_seq NUMBER DEFAULT 0 NOT NULL, high_ts_recid NUMBER, high_df_recid NUMBER, high_rt_recid NUMBER, high_orl_recid NUMBER, high_offr_recid NUMBER DEFAULT 0 NOT NULL, high_rlh_recid NUMBER DEFAULT 0 NOT NULL, high_al_recid NUMBER DEFAULT 0 NOT NULL, high_bs_recid NUMBER DEFAULT 0 NOT NULL, high_bp_recid NUMBER DEFAULT 0 NOT NULL, high_bdf_recid NUMBER DEFAULT 0 NOT NULL, high_cdf_recid NUMBER DEFAULT 0 NOT NULL, high_brl_recid NUMBER DEFAULT 0 NOT NULL, high_bcb_recid NUMBER DEFAULT 0 NOT NULL, high_ccb_recid NUMBER DEFAULT 0 NOT NULL, high_do_recid NUMBER DEFAULT 0 NOT NULL, high_pc_recid NUMBER DEFAULT 0 NOT NULL, high_bsf_recid NUMBER DEFAULT 0 NOT NULL, high_rsr_recid NUMBER DEFAULT 0 NOT NULL, high_tf_recid NUMBER DEFAULT 0 NOT NULL, high_grsp_recid NUMBER DEFAULT 0 NOT NULL, high_nrsp_recid NUMBER DEFAULT 0 NOT NULL, high_bcr_recid NUMBER DEFAULT 0 NOT NULL, low_bcr_recid NUMBER DEFAULT 0 NOT NULL, bcr_in_use VARCHAR2(3) DEFAULT 'NO' NOT NULL) >>> # go thru the node table and assign site_key to un-assigned rows # There must be atleast one row per database in node table corresponding to # 'PRIMARY' database role. If there is no such row, add a row. Also, add a row # to node table, for each distinct db_unique_name known to conf table. define update_site_key_in_node <<< declare cursor node_c is select site_key from node where site_key = 0 for update; cursor site_fromconf is select db_key, db_unique_name, count(*) over (partition by db_key) num_of_sites from (select distinct db_key, db_unique_name from conf where db_unique_name is not null and db_key not in (select db_key from node)); cursor db_not_known_c is select db_key from db where not exists (select * from node where db_key = db.db_key); cursor db_with_no_primary_c is select db_key, database_role from node where db_key not in (select db_key from node where database_role='PRIMARY') for update of node.database_role; last_db_key number; begin -- assign site_key to existing entries in node table for noderec in node_c loop update node set site_key = rman_seq.nextval where current of node_c; end loop; -- get databse sites from conf table not already in node table -- and add a row to node table for siterec in site_fromconf loop begin insert into node(db_key, db_unique_name, site_key, database_role, force_resync2cf) values (siterec.db_key, siterec.db_unique_name,rman_seq.nextval, decode(siterec.num_of_sites, 1, 'PRIMARY','STANDBY'), 'YES'); exception when dup_val_on_index then null; end; end loop; -- add a row to node table, for which there are no rows in node table for dbrec in db_not_known_c loop INSERT INTO node(db_key, database_role, site_key, force_resync2cf) VALUES(dbrec.db_key, 'PRIMARY', rman_seq.nextval, 'NO'); end loop; -- add a row to node table, for which still there does not exist a -- primary databse known for db_rec in db_with_no_primary_c loop if last_db_key is null or last_db_key <> db_rec.db_key then update node set database_role='PRIMARY' where current of db_with_no_primary_c; last_db_key := db_rec.db_key; end if; end loop; commit; end; >>> define add_node_p_constraint <<< alter table node add CONSTRAINT node_p PRIMARY KEY (site_key) >>> define add_site_key_check_constraint <<< alter table node add CONSTRAINT check_site_key CHECK (site_key > 0) >>> define add_site_key_to_conf <<< alter table conf add (site_key number default 0 not null) >>> define assign_site_key_in_conf <<< -- go thru the conf table and assign site_key to un-assigned rows -- Note that there can not be two rows in node table with same db_unique_name declare cursor conf_c is select db_key, db_unique_name, site_key from conf where db_unique_name is not null for update; conf_site_key number; begin for confrec in conf_c loop select site_key into conf_site_key from node where confrec.db_unique_name = node.db_unique_name and confrec.db_key = node.db_key; update conf set site_key = conf_site_key where current of conf_c; end loop; commit; end; >>> define add_site_key_to_al <<< alter table al add (site_key number) >>> define add_al_f2_constraint <<< alter table al add CONSTRAINT al_f2 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE >>> define add_site_key_to_bp <<< alter table bp add (site_key number) >>> define add_encrypted_and_backed_by_osb_to_bp <<< alter table bp add (encrypted VARCHAR2(1) DEFAULT 'N', backed_by_osb VARCHAR2(1) DEFAULT 'N') >>> define add_bp_f2_constraint <<< alter table bp add CONSTRAINT bp_f2 FOREIGN KEY (site_key) REFERENCES node >>> define add_site_key_to_ccf <<< alter table ccf add (site_key number) >>> define add_ccf_f2_constraint <<< alter table ccf add CONSTRAINT ccf_f2 FOREIGN KEY (site_key) REFERENCES node >>> define add_site_key_to_xcf <<< alter table xcf add (site_key number) >>> define add_xcf_f2_constraint <<< alter table xcf add CONSTRAINT xcf_f2 FOREIGN KEY (site_key) REFERENCES node >>> define add_site_key_to_cdf <<< alter table cdf add (site_key number) >>> define add_cdf_f2_constraint <<< alter table cdf add CONSTRAINT cdf_f3 FOREIGN KEY (site_key) REFERENCES node >>> define add_site_key_to_xdf <<< alter table xdf add (site_key number) >>> define add_xdf_f2_constraint <<< alter table xdf add CONSTRAINT xdf_f3 FOREIGN KEY (site_key) REFERENCES node >>> define add_site_key_to_xal <<< alter table xal add (site_key number) >>> define add_xal_f2_constraint <<< alter table xal add CONSTRAINT xal_f2 FOREIGN KEY (site_key) REFERENCES node >>> define add_keep_to_xal <<< alter table xal add (keep_options number default 0 not null, keep_until date) >>> define add_site_key_to_rsr <<< alter table rsr add (site_key number) >>> define add_rsr_osb_allocated_to_rsr <<< alter table rsr add (rsr_osb_allocated varchar2(1)) >>> define drop_rsr_u1_constraint <<< alter table rsr drop CONSTRAINT rsr_u1 drop index >>> define add_rsr_u2_constraint <<< alter table rsr add CONSTRAINT rsr_u2 UNIQUE (dbinc_key, rsr_recid, rsr_stamp, site_key) >>> define add_rsr_f2_constraint <<< alter table rsr add CONSTRAINT rsr_f2 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE >>> define add_site_key_to_bs <<< alter table bs add (site_key number) >>> define add_bs_f2_constraint <<< alter table bs add CONSTRAINT bs_f2 FOREIGN KEY (site_key) REFERENCES node >>> define drop_bcf_u1_constraint <<< alter table bcf drop CONSTRAINT bcf_u1 drop index >>> define drop_bsf_u1_constraint <<< alter table bsf drop CONSTRAINT bsf_u1 drop index >>> define drop_bsf_i_bs_key_index <<< drop index bsf_i_bs_key >>> define add_bsf_u2_constraint <<< alter table bsf add CONSTRAINT bsf_u2 UNIQUE (bs_key) >>> define drop_bdf_u1_constraint <<< alter table bdf drop CONSTRAINT bdf_u1 drop index >>> define add_bdf_u2_constraint <<< alter table bdf add CONSTRAINT bdf_u2 UNIQUE (bs_key, file#) >>> define drop_brl_u1_constraint <<< alter table brl drop CONSTRAINT brl_u1 drop index >>> define add_brl_u2_constraint <<>> define drop_offr_u1_constraint <<< alter table offr drop CONSTRAINT offr_u1 drop index >>> define add_df_key_blocks_to_df <<< alter table df add (df_key NUMBER, blocks NUMBER) >>> define add_df_key_site_key_to_dfatt <<< alter table dfatt add (df_key NUMBER, site_key NUMBER) >>> define drop_dfatt_f3_constraint <<< alter table dfatt drop CONSTRAINT dfatt_f3 drop index >>> # get blocks from dfatt into df table, and convert data from dfatt into # site_dfatt table define make_df_dfatt_siteaware <<< DECLARE TYPE cur_typ IS REF CURSOR; dfatt_c cur_typ; dfattrec df%ROWTYPE; dfrec df%ROWTYPE; sql_stmt VARCHAR2(200); distinct_files NUMBER; distinct_fnames NUMBER; rid VARCHAR2(18); nextval NUMBER; BEGIN -- delete all dfatt rows that have non-nul end_ckp_key values as they are -- not used for anything. EXECUTE IMMEDIATE 'delete dfatt where end_ckp_key is not null and ' || 'end_ckp_key <> 0'; OPEN dfatt_c FOR 'SELECT dbinc_key, file#, create_scn, blocks, rowid ' || ' FROM dfatt ' || 'WHERE end_ckp_key IS NULL FOR UPDATE'; -- Assign a df_key to data files and dfatt rows LOOP FETCH dfatt_c INTO dfattrec.dbinc_key, dfattrec.file#, dfattrec.create_scn, dfattrec.blocks, rid; EXIT WHEN dfatt_c%NOTFOUND; SELECT * INTO dfrec FROM df WHERE dbinc_key = dfattrec.dbinc_key AND file# = dfattrec.file# AND create_scn= dfattrec.create_scn; IF dfrec.df_key IS NULL THEN SELECT rman_seq.nextval INTO nextval FROM dual; UPDATE df SET df_key = nextval, blocks = dfattrec.blocks WHERE file# = dfrec.file# AND create_scn = dfrec.create_scn AND ts# = dfrec.ts# AND dbinc_key IN (SELECT dbinc_key FROM dbinc WHERE db_key = (SELECT db_key FROM dbinc WHERE dbinc_key = dfattrec.dbinc_key)); IF SQL%ROWCOUNT = 0 THEN RAISE_APPLICATION_ERROR(-20000,'not updated any data file'); END IF; EXECUTE IMMEDIATE 'UPDATE dfatt SET df_key = rman_seq.currval ' || 'WHERE ROWID = :1' USING rid; IF SQL%ROWCOUNT <> 1 THEN RAISE_APPLICATION_ERROR(-20001,'not updated dfatt for file'); END IF; ELSE EXECUTE IMMEDIATE 'DELETE dfatt WHERE ROWID = :1' USING rid; END IF; END LOOP; CLOSE dfatt_c; SELECT COUNT(*) INTO distinct_files FROM (SELECT DISTINCT db.db_key, file#, create_scn, ts# FROM df, dbinc, db WHERE df.dbinc_key = dbinc.dbinc_key AND dbinc.db_key = db.db_key); OPEN dfatt_c FOR 'SELECT COUNT(*) FROM dfatt'; FETCH dfatt_c INTO distinct_fnames; CLOSE dfatt_c; IF distinct_files <> distinct_fnames THEN RAISE_APPLICATION_ERROR(-20003,'distinct file count mismatch '|| distinct_files || ',' || distinct_fnames); END IF; -- make this libmem, idempotent by changing end_ckp_value... EXECUTE IMMEDIATE 'UPDATE dfatt SET end_ckp_key = 0'; COMMIT; END; >>> define modify_df_key_not_null <<< alter table df modify(df_key NOT NULL) >>> define drop_dfatt_f1_constraint <<< alter table dfatt drop CONSTRAINT dfatt_f1 drop index >>> define drop_dfatt_f2_constraint <<< alter table dfatt drop CONSTRAINT dfatt_f2 drop index >>> define drop_dfatt_u1_constraint <<< alter table dfatt drop CONSTRAINT dfatt_u1 drop index >>> define drop_dbinc_key_from_dfatt <<< alter table dfatt drop column dbinc_key >>> define drop_end_ckp_key_from_dfatt <<< alter table dfatt drop column end_ckp_key >>> define drop_start_ckp_key_from_dfatt <<< alter table dfatt drop column start_ckp_key >>> define "drop_file#_from_dfatt" <<< alter table dfatt drop column file# >>> define drop_create_scn_from_dfatt <<< alter table dfatt drop column create_scn >>> define drop_blocks_from_dfatt <<< alter table dfatt drop column blocks >>> define rename_dfatt_to_site_dfatt <<< alter table dfatt rename to site_dfatt >>> define modify_df_key_in_site_dfatt_not_null <<< alter table site_dfatt modify(df_key not null) >>> define add_site_dfatt_p_constraint <<< alter table site_dfatt add CONSTRAINT site_dfatt_p PRIMARY KEY (df_key, site_key) >>> define add_site_dfatt_f2_constraint <<< alter table site_dfatt add CONSTRAINT site_dfatt_f2 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE >>> # Assign currently un-assigned nodes in site_dfatt_c to primary site define assign_site_key_in_site_dfatt <<< declare cursor site_dfatt_c is select site_dfatt.df_key, node.site_key, site_dfatt.site_key nullkey from site_dfatt, df, dbinc, node where site_dfatt.site_key is null and node.database_role = 'PRIMARY' and site_dfatt.df_key=df.df_key and df.dbinc_key = dbinc.dbinc_key and dbinc.db_key = node.db_key for update of site_dfatt.site_key; begin for site_dfatt_rec in site_dfatt_c loop update site_dfatt set site_key = site_dfatt_rec.site_key where current of site_dfatt_c; end loop; commit; end; >>> define modify_site_key_in_site_dfatt_not_null <<< alter table site_dfatt modify(site_key NOT NULL) >>> define add_siteaware_columns_to_tf <<< alter table tf add (tf_key NUMBER, blocks NUMBER, autoextend VARCHAR2(3), max_size NUMBER, next_size NUMBER) >>> define add_tf_key_site_key_to_tfatt <<< alter table tfatt add (tf_key NUMBER, site_key NUMBER) >>> define drop_tfatt_f3_constraint <<< alter table tfatt drop CONSTRAINT tfatt_f3 drop index >>> # get blocks, autoextend, max_size and next_size from tfatt into tf table, # and convert data from tfatt into site_tfatt table define make_tf_tfatt_siteaware <<< DECLARE TYPE cur_typ IS REF CURSOR; tfatt_c cur_typ; tfattrec tf%ROWTYPE; tfrec tf%ROWTYPE; tfrec_blocks NUMBER; tfrec_autoextend VARCHAR2(3); tfrec_max_size NUMBER; tfrec_next_size NUMBER; sql_stmt VARCHAR2(200); distinct_files NUMBER; distinct_fnames NUMBER; rid VARCHAR2(18); nextval NUMBER; BEGIN -- delete all tfatt rows that have non-nul end_ckp_key values as they are -- not used for anything. EXECUTE IMMEDIATE 'delete tfatt where end_ckp_key is not null and ' || 'end_ckp_key <> 0'; OPEN tfatt_c FOR 'SELECT dbinc_key, file#, create_scn, blocks, autoextend,' || ' max_size, next_size, rowid ' || ' FROM tfatt ' || 'WHERE end_ckp_key IS NULL FOR UPDATE'; -- Assign a tf_key to data files and tfatt rows LOOP FETCH tfatt_c INTO tfattrec.dbinc_key, tfattrec.file#, tfattrec.create_scn, tfrec_blocks, tfrec_autoextend, tfrec_max_size, tfrec_next_size, rid; EXIT WHEN tfatt_c%NOTFOUND; SELECT * INTO tfrec FROM tf WHERE dbinc_key = tfattrec.dbinc_key AND file# = tfattrec.file# AND create_scn= tfattrec.create_scn; IF tfrec.tf_key IS NULL THEN SELECT rman_seq.nextval INTO nextval FROM dual; EXECUTE IMMEDIATE 'UPDATE tf SET tf_key = :1, ' || ' blocks = :2, ' || ' autoextend = :3, ' || ' max_size = :4, ' || ' next_size = :5 ' || 'WHERE file# = :6 ' || 'AND create_scn = :7 ' || 'AND (create_time = :8 ' || ' OR create_time IS NULL AND :9 IS NULL) ' || 'AND ts# = :10 ' || 'AND rfile# = :11 ' || 'AND dbinc_key IN ' || ' (SELECT dbinc_key FROM dbinc ' || ' WHERE db_key = (SELECT db_key FROM dbinc ' || ' WHERE dbinc_key = :12))' USING nextval, tfrec_blocks, tfrec_autoextend, tfrec_max_size, tfrec_next_size, tfrec.file#, tfrec.create_scn, tfrec.create_time, tfrec.create_time, tfrec.ts#, tfrec.rfile#, tfattrec.dbinc_key; IF SQL%ROWCOUNT = 0 THEN RAISE_APPLICATION_ERROR(-20004,'not updated any temp file'); END IF; EXECUTE IMMEDIATE 'UPDATE tfatt SET tf_key = rman_seq.currval ' || 'WHERE ROWID = :1' USING rid; IF SQL%ROWCOUNT <> 1 THEN RAISE_APPLICATION_ERROR(-20005,'not updated tfatt for file'); END IF; ELSE EXECUTE IMMEDIATE 'DELETE tfatt WHERE ROWID = :1' USING rid; END IF; END LOOP; CLOSE tfatt_c; SELECT COUNT(*) INTO distinct_files FROM (SELECT DISTINCT db.db_key, file#, create_scn, create_time, ts#, rfile# FROM tf, dbinc, db WHERE tf.dbinc_key = dbinc.dbinc_key AND dbinc.db_key = db.db_key); OPEN tfatt_c FOR 'SELECT COUNT(*) FROM tfatt'; FETCH tfatt_c INTO distinct_fnames; CLOSE tfatt_c; IF distinct_files <> distinct_fnames THEN RAISE_APPLICATION_ERROR(-20006,'distinct file count mismatch '|| distinct_files || ',' || distinct_fnames); END IF; -- make this libmem, idempotent by changing end_ckp_value... EXECUTE IMMEDIATE 'UPDATE tfatt SET end_ckp_key = 0'; COMMIT; END; >>> define modify_tf_key_not_null_in_tf <<< alter table tf modify(tf_key NOT NULL) >>> define drop_tfatt_f1_constraint <<< alter table tfatt drop CONSTRAINT tfatt_f1 drop index >>> define drop_tfatt_f2_constraint <<< alter table tfatt drop CONSTRAINT tfatt_f2 drop index >>> define drop_tfatt_u1_constraint <<< alter table tfatt drop CONSTRAINT tfatt_u1 drop index >>> define drop_dbinc_key_from_tfatt <<< alter table tfatt drop column dbinc_key >>> define drop_end_ckp_key_from_tfatt <<< alter table tfatt drop column end_ckp_key >>> define drop_start_ckp_key_from_tfatt <<< alter table tfatt drop column start_ckp_key >>> define "drop_file#_from_tfatt" <<< alter table tfatt drop column file# >>> define drop_create_scn_from_tfatt <<< alter table tfatt drop column create_scn >>> define drop_blocks_from_tfatt <<< alter table tfatt drop column blocks >>> define drop_autoextend_from_tfatt <<< alter table tfatt drop column autoextend >>> define drop_max_size_from_tfatt <<< alter table tfatt drop column max_size >>> define drop_next_size_from_tfatt <<< alter table tfatt drop column next_size >>> define rename_tfatt_to_site_tfatt <<< alter table tfatt rename to site_tfatt >>> define modify_tf_key_in_site_tfatt_not_null <<< alter table site_tfatt modify(tf_key not null) >>> define add_site_tfatt_p_constraint <<< alter table site_tfatt add CONSTRAINT site_tfatt_p PRIMARY KEY (tf_key, site_key) >>> define add_site_tfatt_f2_constraint <<< alter table site_tfatt add CONSTRAINT site_tfatt_f2 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE >>> # Assign currently un-assigned nodes in site_tfatt_c to primary site define assign_site_key_in_site_tfatt <<< declare cursor site_tfatt_c is select site_tfatt.tf_key, node.site_key, site_tfatt.site_key nullkey from site_tfatt, tf, dbinc, node where site_tfatt.site_key is null and node.database_role = 'PRIMARY' and site_tfatt.tf_key=tf.tf_key and tf.dbinc_key = dbinc.dbinc_key and dbinc.db_key = node.db_key for update of site_tfatt.site_key; begin for site_tfatt_rec in site_tfatt_c loop update site_tfatt set site_key = site_tfatt_rec.site_key where current of site_tfatt_c; end loop; end; >>> define modify_site_key_in_site_tfatt_not_null <<< alter table site_tfatt modify(site_key NOT NULL) >>> define add_site_key_to_ckp <<< alter table ckp add (site_key number default 0 not null) >>> # go thru the ckp table and assign site_key to un-assigned rows define update_site_key_in_ckp <<< declare cursor ckp_c is select ckp.site_key unassigned, node.site_key from ckp, dbinc, node where ckp.site_key = 0 and ckp.dbinc_key = dbinc.dbinc_key and dbinc.db_key = node.db_key and node.database_role = 'PRIMARY' for update of ckp.site_key; begin for ckprec in ckp_c loop update ckp set site_key = ckprec.site_key where current of ckp_c; end loop; commit; end; >>> define add_ckp_f3_constraint <<< alter table ckp add constraint ckp_f3 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE >>> define drop_high_df_recid_from_ckp <<< alter table ckp drop column high_df_recid >>> define drop_high_conf_recid_from_db <<< alter table db drop column high_conf_recid >>> define drop_last_kccdivts_from_db <<< alter table db drop column last_kccdivts >>> define drop_high_ic_recid_from_db <<< alter table db drop column high_ic_recid >>> define drop_cf_create_time_from_dbinc <<< alter table dbinc drop column cf_create_time >>> define drop_ckp_scn_from_dbinc <<< alter table dbinc drop column ckp_scn >>> define drop_full_ckp_cf_seq_from_dbinc <<< alter table dbinc drop column full_ckp_cf_seq >>> define drop_job_ckp_cf_seq_from_dbinc <<< alter table dbinc drop column job_ckp_cf_seq >>> define drop_high_ts_recid_from_dbinc <<< alter table dbinc drop column high_ts_recid >>> define drop_high_df_recid_from_dbinc <<< alter table dbinc drop column high_df_recid >>> define drop_high_rt_recid_from_dbinc <<< alter table dbinc drop column high_rt_recid >>> define drop_high_orl_recid_from_dbinc <<< alter table dbinc drop column high_orl_recid >>> define drop_high_offr_recid_from_dbinc <<< alter table dbinc drop column high_offr_recid >>> define drop_high_rlh_recid_from_dbinc <<< alter table dbinc drop column high_rlh_recid >>> define drop_high_al_recid_from_dbinc <<< alter table dbinc drop column high_al_recid >>> define drop_high_bs_recid_from_dbinc <<< alter table dbinc drop column high_bs_recid >>> define drop_high_bp_recid_from_dbinc <<< alter table dbinc drop column high_bp_recid >>> define drop_high_bdf_recid_from_dbinc <<< alter table dbinc drop column high_bdf_recid >>> define drop_high_cdf_recid_from_dbinc <<< alter table dbinc drop column high_cdf_recid >>> define drop_high_brl_recid_from_dbinc <<< alter table dbinc drop column high_brl_recid >>> define drop_high_bcb_recid_from_dbinc <<< alter table dbinc drop column high_bcb_recid >>> define drop_high_ccb_recid_from_dbinc <<< alter table dbinc drop column high_ccb_recid >>> define drop_high_do_recid_from_dbinc <<< alter table dbinc drop column high_do_recid >>> define drop_high_pc_recid_from_dbinc <<< alter table dbinc drop column high_pc_recid >>> define drop_high_bsf_recid_from_dbinc <<< alter table dbinc drop column high_bsf_recid >>> define drop_high_rsr_recid_from_dbinc <<< alter table dbinc drop column high_rsr_recid >>> define drop_high_tf_recid_from_dbinc <<< alter table dbinc drop column high_tf_recid >>> define drop_high_grsp_recid_from_dbinc <<< alter table dbinc drop column high_grsp_recid >>> define add_type_to_orl <<< alter table orl add (type varchar2(7) default 'ONLINE') >>> define add_bytes_to_orl <<< alter table orl add (bytes number default NULL) >>> define add_site_key_to_orl <<< alter table orl add (site_key number) >>> define add_orl_f2_constraint <<< alter table orl add CONSTRAINT orl_f2 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE >>> # go thru the orl table and assign site_key to un-assigned rows define update_site_key_in_orl <<< declare cursor orl_c is select orl.site_key unassigned, node.site_key from orl, dbinc, node where orl.site_key is null and orl.dbinc_key = dbinc.dbinc_key and dbinc.db_key = node.db_key and node.database_role = 'PRIMARY' for update of orl.site_key; begin for orlrec in orl_c loop update orl set site_key = orlrec.site_key where current of orl_c; end loop; commit; end; >>> define add_db_unique_name_to_bsf <<< alter table bsf add (db_unique_name varchar2(30)) >>> define drop_site_key_from_bsf <<< alter table bsf drop column site_key >>> define upgcat_bs_multi_section <<< alter table bs add (multi_section varchar2(1)) >>> define add_site_key_to_grsp <<< alter table grsp add (site_key number) >>> define add_creation_time_to_grsp <<< alter table grsp add (creation_time date default NULL) >>> define add_rsptime_to_grsp <<< alter table grsp add (rsptime date default NULL) >>> define add_guaranteed_to_grsp <<< alter table grsp add (guaranteed varchar2(3) default 'YES') >>> # go thru the grsp table and assign site_key to un-assigned rows define update_site_key_in_grsp <<< declare cursor grsp_c is select grsp.site_key unassigned, node.site_key from grsp, dbinc, node where grsp.site_key is null and grsp.dbinc_key = dbinc.dbinc_key and dbinc.db_key = node.db_key and node.database_role = 'PRIMARY' for update of grsp.site_key; begin for grsprec in grsp_c loop update grsp set site_key = grsprec.site_key where current of grsp_c; end loop; commit; end; >>> define check_grsp_u1_constraint <<< declare cursor grsp_c is select site_key, rspname from grsp order by site_key, rspname for update of grsp.site_key; prev_site_key number; prev_rspname grsp.rspname%type; begin delete from grsp where site_key is null; for grsprec in grsp_c loop if (prev_site_key = grsprec.site_key and prev_rspname = grsprec.rspname) then delete grsp where current of grsp_c; else prev_site_key := grsprec.site_key; prev_rspname := grsprec.rspname; end if; end loop; end; >>> define add_grsp_u1_constraint <<< alter table grsp add constraint grsp_u1 UNIQUE(site_key, rspname) >>> define add_grsp_u3_constraint <<< alter table grsp add constraint grsp_u3 FOREIGN KEY (site_key) REFERENCES node ON DELETE CASCADE >>> define upgcat_rcver_constraint <<< alter table rcver add constraint rcver_version_unique unique(version) >>> define drop_tf_u2_constraint <<< alter table tf drop CONSTRAINT tf_u2 drop index >>> define drop_df_u2_constraint <<< alter table df drop CONSTRAINT df_u2 drop index >>> #use the same template to delete all duplicate brl/bdf/bsf entries define delete_dup_brl_bsf_bdf_rows <<< declare cursor brl_c is select bs_key, thread#, sequence#, lead(sequence#, 1) over (partition by bs_key, thread# order by sequence#) nextseq# from brl for update of brl.bs_key; cursor bsf_c is select bs_key, lead(bs_key, 1) over (order by bs_key) next_bs_key from bsf for update of bsf.bs_key; cursor bdf_c is select bs_key, file#, lead(file#, 1) over (partition by bs_key order by file#) nextfile# from bdf for update of bdf.bs_key; begin -- bug 5971763, delete brl entries with timestamp values 0 delete brl where brl_stamp = 0; -- bug 6253529, delete duplicate brl entries within backup set for brlrec in brl_c loop if (brlrec.sequence# = brlrec.nextseq#) then delete brl where current of brl_c; end if; end loop; -- bug 6929736, delete duplicate bsf and bdf rows for bsfrec in bsf_c loop if bsfrec.bs_key = bsfrec.next_bs_key then delete bsf where current of bsf_c; end if; end loop; for bdfrec in bdf_c loop if (bdfrec.file# = bdfrec.nextfile#) then delete bdf where current of bdf_c; end if; end loop; commit; end; >>> # following columns drop_scn,drop_time,blocks,autoextend, max_size, # next_size are added to site_tfatt, They are moved from tf. define add_some_cols_to_site_tfatt <<< alter table site_tfatt add (drop_scn number, drop_time date , blocks number, autoextend varchar2(3), max_size number, next_size number) >>> define populate_some_cols_in_site_tfatt <<< DECLARE TYPE cur_typ IS REF CURSOR; site_tfatt_c cur_typ; site_tfatt_rec site_tfatt%ROWTYPE; site_tfatt_rid VARCHAR2(18); tf_drop_scn NUMBER; tf_drop_time DATE; tf_blocks NUMBER; tf_autoextend VARCHAR2(3); tf_max_size NUMBER; tf_next_size NUMBER; BEGIN OPEN site_tfatt_c FOR 'SELECT site_tfatt.drop_scn, site_tfatt.drop_time, ' || ' site_tfatt.blocks, site_tfatt.autoextend, ' || ' site_tfatt.max_size, site_tfatt.next_size, ' || ' tf.drop_scn tf_drop_scn, tf.drop_time tf_drop_time, ' || ' tf.blocks tf_blocks, tf.autoextend tf_autoextend, ' || ' tf.max_size tf_max_size, tf.next_size tf_next_size, ' || ' site_tfatt.ROWID site_tfatt_rowid ' || 'FROM site_tfatt, tf ' || 'WHERE site_tfatt.tf_key = tf.tf_key ' || 'FOR UPDATE OF site_tfatt.drop_scn, site_tfatt.drop_time, '|| ' site_tfatt.blocks, site_tfatt.autoextend, '|| ' site_tfatt.max_size, site_tfatt.next_size'; LOOP FETCH site_tfatt_c INTO site_tfatt_rec.drop_scn, site_tfatt_rec.drop_time, site_tfatt_rec.blocks, site_tfatt_rec.autoextend, site_tfatt_rec.max_size, site_tfatt_rec.next_size, tf_drop_scn, tf_drop_time, tf_blocks, tf_autoextend, tf_max_size, tf_next_size, site_tfatt_rid; EXIT WHEN site_tfatt_c%NOTFOUND; EXECUTE IMMEDIATE 'update site_tfatt set drop_scn = :1, drop_time = :2, ' || ' blocks = :3, autoextend = :4, max_size = :5, next_size = :6 ' || ' where ROWID = :7' USING tf_drop_scn, tf_drop_time, tf_blocks, tf_autoextend, tf_max_size, tf_next_size, site_tfatt_rid; END LOOP; COMMIT; END; >>> define drop_tf_u1 <<< alter table tf drop constraint tf_u1 drop index >>> define drop_drop_scn_from_tf <<< alter table tf drop column drop_scn >>> define drop_drop_time_from_tf <<< alter table tf drop column drop_time >>> define drop_blocks_from_tf <<< alter table tf drop column blocks >>> define drop_autoextend_from_tf <<< alter table tf drop column autoextend >>> define drop_max_size_from_tf <<< alter table tf drop column max_size >>> define drop_next_size_from_tf <<< alter table tf drop column next_size >>> # Below two libmem upgcat_strt_0 and upgcat_strt_1 are used to lock # upgrade catalog process until it is finished by this version of RMAN. # This will allow old version of RMAN to detect that catalog upgrade is # going on and not allow them to run upgrade again on a half upgraded catalog # database schema. # bug 6745130 - detect incomplete upgraded catalog define upgcat_strt_0 <<< create or replace package dbms_rcvcat authid current_user is function getPackageVersion return varchar2; function getCatalogVersion return varchar2; -- Used to identify if the upgrade of catalog schema was not complete UPGRADE_COMPLETED CONSTANT number := 0; end; >>> define upgcat_strt_1 <<< create or replace package body dbms_rcvcat is function getPackageVersion return varchar2 is begin return '11.02.00.03'; end; function getCatalogVersion return varchar2 is begin return '11.02.00.03'; end; end; >>> define add_create_thread_size_to_df <<< ALTER TABLE df ADD (create_thread NUMBER, create_size NUMBER) >>> # Remove null db_unique_name rows from NODE table for a database which # already has non-null db_unique_name rows. # bug 7709787 define remove_null_db_unique_name_rows_from_node <<< declare cursor db_with_null_db_unique_name_c is select db_key, db_unique_name, count(*) over (partition by db_key) num_of_sites from node where db_key in (select db_key from node where db_unique_name is null) for update of node.db_key; begin for db_with_null_db_unique_name in db_with_null_db_unique_name_c loop if db_with_null_db_unique_name.num_of_sites > 1 and db_with_null_db_unique_name.db_unique_name is null then delete node where current of db_with_null_db_unique_name_c; end if; end loop; commit; end; >>> define drop_constraint_status_rt <<< alter table rt drop CONSTRAINT rt_c_status drop index >>> define recreate_constraint_status_rt <<< alter table rt add CONSTRAINT rt_c1_status CHECK (status in ('D','E','O','I')) >>> # Set site_key value for all other tables, where only one site_key is known # for a database. This avoids running CROSSCHECK to associate site_key with # database, and will also help when a new data guard system is introduced in # the environment. # bug 5408094 define set_site_key_for_single_site_dbs <<< declare cursor onesite is select * from (select site_key, db_key, count(db_key) over (partition by db_key) num_sites from node where site_key > 0) where num_sites=1; begin for onesite_row in onesite loop update ckp set site_key = onesite_row.site_key where site_key is null and ckp_key in (select ckp_key from ckp, dbinc where dbinc.dbinc_key = ckp.dbinc_key and dbinc.db_key = onesite_row.db_key); update site_dfatt set site_key = onesite_row.site_key where site_key is null and df_key in (select df_key from df, dbinc where dbinc.dbinc_key = df.dbinc_key and dbinc.db_key = onesite_row.db_key); update site_tfatt set site_key = onesite_row.site_key where site_key is null and tf_key in (select tf_key from tf, dbinc where dbinc.dbinc_key = tf.dbinc_key and dbinc.db_key = onesite_row.db_key); update orl set site_key = onesite_row.site_key where site_key is null and dbinc_key in (select dbinc_key from dbinc where db_key = onesite_row.db_key); update al set site_key = onesite_row.site_key where site_key is null and dbinc_key in (select dbinc_key from dbinc where db_key = onesite_row.db_key); update bs set site_key = onesite_row.site_key where site_key is null and db_key = onesite_row.db_key; update bp set site_key = onesite_row.site_key where site_key is null and bs_key in (select bs_key from bs where bs.db_key = onesite_row.db_key); update ccf set site_key = onesite_row.site_key where site_key is null and dbinc_key in (select dbinc_key from dbinc where db_key = onesite_row.db_key); update xcf set site_key = onesite_row.site_key where site_key is null and dbinc_key in (select dbinc_key from dbinc where db_key = onesite_row.db_key); update cdf set site_key = onesite_row.site_key where site_key is null and dbinc_key in (select dbinc_key from dbinc where db_key = onesite_row.db_key); update xdf set site_key = onesite_row.site_key where site_key is null and dbinc_key in (select dbinc_key from dbinc where db_key = onesite_row.db_key); update xal set site_key = onesite_row.site_key where site_key is null and dbinc_key in (select dbinc_key from dbinc where db_key = onesite_row.db_key); update rsr set site_key = onesite_row.site_key where site_key is null and dbinc_key in (select dbinc_key from dbinc where db_key = onesite_row.db_key); update grsp set site_key = onesite_row.site_key where site_key is null and dbinc_key in (select dbinc_key from dbinc where db_key = onesite_row.db_key); update nrsp set site_key = onesite_row.site_key where site_key is null and dbinc_key in (select dbinc_key from dbinc where db_key = onesite_row.db_key); update bcr set site_key = onesite_row.site_key where site_key is null and df_key in (select df_key from df, dbinc where dbinc.dbinc_key = df.dbinc_key and dbinc.db_key = onesite_row.db_key); -- do commit individually for each database to reduce the size -- of undo for one transaction commit; end loop; end; >>> define add_plugin_scn_to_ts <<< alter table ts add (plugin_scn number default 0 not null) >>> define add_plugin_scn_to_tsatt <<< alter table tsatt add (plugin_scn number default 0 not null) >>> define upgcat_drop_tsatt_f1 <<< alter table tsatt drop constraint tsatt_f1 drop index >>> define upgcat_drop_tf_f1 <<< alter table tf drop constraint tf_f1 drop index >>> define upgcat_drop_df_f1 <<< alter table df drop constraint df_f1 drop index >>> define upgcat_ts_drop_ts_p_tts <<< alter table ts drop constraint ts_p drop index >>> define upgcat_ts_add_ts_p1_tts <<< alter table ts add constraint ts_p1 primary key (dbinc_key, ts#, create_scn, plugin_scn) >>> define upgcat_drop_ts_u2 <<< alter table ts drop constraint ts_u2 drop index >>> define upgcat_add_ts_u3 <<< alter table ts add CONSTRAINT ts_u3 UNIQUE (dbinc_key, ts_name, create_scn, plugin_scn) >>> define upgcat_drop_tsatt_u1 <<< alter table tsatt drop constraint tsatt_u1 drop index >>> define upgcat_add_tsatt_u2 <<< alter table tsatt add CONSTRAINT tsatt_u2 UNIQUE (dbinc_key, ts#, create_scn, plugin_scn, end_ckp_key) >>> define upgcat_add_plugin_scn_to_tf <<< alter table tf add (plugin_scn number default 0 not null) >>> define upgcat_add_tf_c1_plugin_scn <<< alter table tf add CONSTRAINT tf_c1_plugin_scn CHECK (plugin_scn = 0) >>> define upgcat_add_tf_f2 <<< alter table tf add CONSTRAINT tf_f2 FOREIGN KEY (dbinc_key, ts#, ts_create_scn, plugin_scn) REFERENCES ts ON DELETE CASCADE >>> define upgcat_fix_plugin_scn_in_ts_and_tsatt <<< begin for plugin_df_row in ( select distinct dbinc_key, ts#, ts_create_scn, plugin_scn from df where plugin_scn <> 0 ) loop update ts set plugin_scn = plugin_df_row.plugin_scn where ts.dbinc_key = plugin_df_row.dbinc_key and ts.ts# = plugin_df_row.ts# and ts.create_scn = plugin_df_row.ts_create_scn and ts.plugin_scn = 0; update tsatt set plugin_scn = plugin_df_row.plugin_scn where tsatt.dbinc_key = plugin_df_row.dbinc_key and tsatt.ts# = plugin_df_row.ts# and tsatt.create_scn = plugin_df_row.ts_create_scn and tsatt.plugin_scn = 0; commit; end loop; -- delete orphan rows in DF table, which does not belong to any existing -- tablespace so that we can enable back the foreign key constraints. for orphan_ts_in_df in ( select distinct dbinc_key, ts#, ts_create_scn, plugin_scn from df minus select distinct dbinc_key, ts#, create_scn, plugin_scn from ts ) loop delete df where dbinc_key = orphan_ts_in_df.dbinc_key and ts# = orphan_ts_in_df.ts# and ts_create_scn = orphan_ts_in_df.ts_create_scn and plugin_scn = orphan_ts_in_df.plugin_scn; commit; end loop; end; >>> define upgcat_add_tsatt_f4 <<< alter table tsatt add CONSTRAINT tsatt_f4 FOREIGN KEY (dbinc_key, ts#, create_scn, plugin_scn) REFERENCES ts ON DELETE CASCADE INITIALLY DEFERRED >>> define upgcat_add_df_f2 <<< alter table df add CONSTRAINT df_f2 FOREIGN KEY (dbinc_key, ts#, ts_create_scn, plugin_scn) REFERENCES ts ON DELETE CASCADE >>> ########################################################################### # Drop Recovery Catalog # # # # If new members are added here, the static array krmkdrp_libunits must # # be updated with the new member names. # # # # If any database objects are removed between one version and another # # of the recovery catalog (i.e. a catalog upgrade drops an object), do # # NOT remove the corresponding drpcat libmem for that object, because # # the DROP CATALOG command must remove all remnants of any version of # # the catalog. # # # # ######################################################################### define drpcat_1 <<< DROP SEQUENCE rman_seq >>> define drpcat_2 <<< DROP TABLE rcver >>> define drpcat_3 <<< DROP TABLE db cascade constraints >>> define drpcat_4 <<< DROP TABLE conf cascade constraints >>> define drpcat_5 <<< DROP TABLE dbinc cascade constraints >>> define drpcat_6 <<< DROP TABLE ckp cascade constraints >>> define drpcat_7 <<< DROP TABLE ts cascade constraints >>> define drpcat_8 <<< DROP TABLE tsatt cascade constraints >>> define drpcat_9 <<< DROP TABLE df cascade constraints >>> define drpcat_10 <<< DROP TABLE site_dfatt cascade constraints >>> define drpcat_11 <<< DROP TABLE offr cascade constraints >>> define drpcat_12 <<< DROP TABLE rr cascade constraints >>> define drpcat_13 <<< DROP TABLE rt cascade constraints >>> define drpcat_14 <<< DROP TABLE orl cascade constraints >>> define drpcat_15 <<< DROP TABLE rlh cascade constraints >>> define drpcat_16 <<< DROP TABLE bs cascade constraints >>> define drpcat_17 <<< DROP TABLE bp cascade constraints >>> define drpcat_18 <<< DROP TABLE bcf cascade constraints >>> define drpcat_19 <<< DROP TABLE ccf cascade constraints >>> define drpcat_20 <<< DROP TABLE xcf cascade constraints >>> define drpcat_21 <<< DROP TABLE bdf cascade constraints >>> define drpcat_22 <<< DROP TABLE cdf cascade constraints >>> define drpcat_23 <<< DROP TABLE xdf cascade constraints >>> define drpcat_24 <<< DROP TABLE brl cascade constraints >>> define drpcat_25 <<< DROP TABLE al cascade constraints >>> define drpcat_26 <<< DROP TABLE bcb cascade constraints >>> define drpcat_27 <<< DROP TABLE ccb cascade constraints >>> define drpcat_28 <<< DROP TABLE scr cascade constraints >>> define drpcat_29 <<< DROP TABLE scrl cascade constraints >>> define drpcat_30 <<< DROP VIEW rc_database >>> define drpcat_31 <<< DROP VIEW rc_database_incarnation >>> define drpcat_32 <<< DROP VIEW rc_resync >>> define drpcat_33 <<< DROP VIEW rc_checkpoint >>> define drpcat_34 <<< DROP VIEW rc_tablespace >>> define drpcat_35 <<< DROP VIEW rc_datafile >>> define drpcat_36 <<< DROP VIEW rc_redo_thread >>> define drpcat_37 <<< DROP VIEW rc_redo_log >>> define drpcat_38 <<< DROP VIEW rc_log_history >>> define drpcat_39 <<< DROP VIEW rc_archived_log >>> define drpcat_40 <<< DROP VIEW rc_backup_set >>> define drpcat_41 <<< DROP VIEW rc_backup_piece >>> define drpcat_42 <<< DROP VIEW rc_backup_datafile >>> define drpcat_43 <<< DROP VIEW rc_backup_controlfile >>> define drpcat_44 <<< DROP VIEW rc_datafile_copy >>> define drpcat_45 <<< DROP VIEW rc_controlfile_copy >>> define drpcat_46 <<< DROP VIEW rc_backup_redolog >>> define drpcat_47 <<< DROP VIEW rc_backup_corruption >>> define drpcat_48 <<< DROP VIEW rc_copy_corruption >>> define drpcat_49 <<< DROP VIEW rc_offline_range >>> define drpcat_50 <<< DROP VIEW rc_stored_script >>> define drpcat_51 <<< DROP VIEW rc_stored_script_line >>> define drpcat_52 <<< DROP VIEW rc_proxy_datafile >>> define drpcat_53 <<< DROP VIEW rc_proxy_controlfile >>> define drpcat_54 <<< DROP VIEW rc_rman_configuration >>> define drpcat_55 <<< DROP PACKAGE dbms_rcvcat >>> define drpcat_56 <<< DROP PACKAGE dbms_rcvman >>> define drpcat_57 <<< DROP TABLE config >>> define drpcat_58 <<< DROP TABLE sfile >>> define drpcat_59 <<< DROP VIEW rc_stored_file >>> define drpcat_60 <<< DROP VIEW rc_database_block_corruption >>> define drpcat_61 <<< DROP TABLE bsf >>> define drpcat_62 <<< DROP VIEW rc_backup_spfile >>> define drpcat_63 <<< DROP TABLE xal cascade constraints >>> define drpcat_64 <<< DROP VIEW rc_proxy_archivedlog >>> define drpcat_65 <<< DROP VIEW rc_backup_files >>> define drpcat_66 <<< DROP TABLE rsr cascade constraints >>> define drpcat_67 <<< DROP VIEW RC_RMAN_STATUS cascade constraints >>> define drpcat_68 <<< DROP TABLE fb >>> define drpcat_69 <<< DROP TABLE tf cascade constraints >>> define drpcat_70 <<< DROP TABLE site_tfatt cascade constraints >>> define drpcat_71 <<< DROP VIEW rc_tempfile >>> define drop_dfatt <<< DROP TABLE dfatt cascade constraints >>> define drop_tfatt <<< DROP TABLE tfatt cascade constraints >>> define drop_vpc_users <<< DROP TABLE vpc_users >>> define drop_vpc_databases <<< DROP TABLE vpc_databases >>> define drop_cfs <<< DROP TABLE cfs >>> define drop_node <<< DROP TABLE NODE cascade constraints >>> define drop_tempres <<< declare tempres_present number; type cur_typ is ref cursor; tempres_c cur_typ; tempres_q varchar2(256) := 'select name, data_type from tempres'; name varchar2(256); data_type varchar2(256); begin select count(*) into tempres_present from user_tab_columns where table_name = 'TEMPRES'; if (tempres_present = 0) then return; end if; open tempres_c for tempres_q; loop fetch tempres_c into name, data_type; exit when tempres_c%notfound; begin if data_type = 'TABLE' then execute immediate 'drop table ' || name; elsif data_type = 'DBLINK' then execute immediate 'drop database link ' || name; end if; exception when others then null; end; end loop; execute immediate 'drop table tempres'; end; >>> define drop_grsp <<< DROP TABLE grsp cascade constraints >>> define drop_nrsp <<< DROP TABLE nrsp >>> define drop_bcr <<< DROP TABLE bcr cascade constraints >>> # end of file recover.txt