Thursday, August 11, 2005

Archive_Schema

This procedure (which by the way can EASILY be converted to a script) is to arcive a set of selected schemas. To do this, the user you login as (you shouldn't use SYS if you can avoid it... similar principle as root in the *NIX world) has to have the SELECT_CATALOG_ROLE granted. Note the AUTHID CURRENT_USER in the CREATE OR REPLACE line. This is also part of the security features for this script to allow viewing the DDL of other schema objects provided by Oracle and PL/SQL.

CREATE OR REPLACE PROCEDURE Archive_Schema( fileDirectory IN VARCHAR2, schemaList IN VARCHAR2, delimiter IN VARCHAR2 DEFAULT ',' ) AUTHID CURRENT_USER IS
mCLOB CLOB;
mOutputFile UTL_FILE.FILE_TYPE;
mOwner VARCHAR2(30);
mObjectType VARCHAR2(18);
mObjectName VARCHAR2(30);
mDDLCursorID INTEGER;
mReturnValue INTEGER;
mErrCode NUMBER;

mDBHostName VARCHAR(64);
mDBInstanceName VARCHAR(16);
mDBInstanceNumber NUMBER;
mDBVersion VARCHAR2(17);
mDBStartupTime DATE;

mTypeSchemaArchive typeSchemaArchive := typeSchemaArchive();

PROCEDURE GetDBInfo IS
BEGIN
DBMS_SQL.PARSE( mDDLCursorID, 'SELECT instance_number, instance_name, host_name, version, startup_time FROM v$instance', DBMS_SQL.NATIVE );

DBMS_SQL.DEFINE_COLUMN( mDDLCursorID, 1, mDBInstanceNumber );
DBMS_SQL.DEFINE_COLUMN( mDDLCursorID, 2, mDBInstanceName, 16 );
DBMS_SQL.DEFINE_COLUMN( mDDLCursorID, 3, mDBHostName, 64 );
DBMS_SQL.DEFINE_COLUMN( mDDLCursorID, 4, mDBVersion, 17 );
DBMS_SQL.DEFINE_COLUMN( mDDLCursorID, 5, mDBStartupTime );

mReturnValue := DBMS_SQL.EXECUTE_AND_FETCH( mDDLCursorID );

DBMS_SQL.COLUMN_VALUE( mDDLCursorID, 1, mDBInstanceNumber );
DBMS_SQL.COLUMN_VALUE( mDDLCursorID, 2, mDBInstanceName );
DBMS_SQL.COLUMN_VALUE( mDDLCursorID, 3, mDBHostName );
DBMS_SQL.COLUMN_VALUE( mDDLCursorID, 4, mDBVersion );
DBMS_SQL.COLUMN_VALUE( mDDLCursorID, 5, mDBStartupTime );
END;
--END GetDBInfo

PROCEDURE OpenOutputFile( fileDirectory IN VARCHAR2, schemaList IN VARCHAR2, delimiter IN VARCHAR2 DEFAULT ',' ) IS
filename VARCHAR2(255);

BEGIN
filename := mDBInstancename || '_' || REPLACE( schemaList, delimiter, '_' ) || '_' || TO_CHAR( SYSDATE, 'yyyymmddhh24miss' ) || '.sql';
mOutputFile := UTL_FILE.FOPEN( fileDirectory, filename, 'w', 32767 );
END;
--END OpenOutputFile

PROCEDURE ParseSchemaList( schemaList IN VARCHAR2, delimiter IN VARCHAR2 DEFAULT ',' ) IS
-- declare and initialize PL/SQL table based on pre-created type
-- variables needed for parsing
offset NUMBER DEFAULT 1;
overallLength NUMBER DEFAULT LENGTH( schemaList ); -- The length of the input string.
stringLength NUMBER; -- The length of the current chunk.

BEGIN
-- load temp table type from delimited list

WHILE offset < overallLength LOOP
stringLength := INSTR( schemaList, delimiter, offset );
IF stringLength = 0 THEN
stringLength := overallLength + 1;
END IF;

mTypeSchemaArchive.EXTEND;
mTypeSchemaArchive( mTypeSchemaArchive.COUNT ) := SUBSTR( schemaList, offset, stringLength - offset );

offset := stringLength + 1;
END LOOP;
END;
--END ParseSchemaList;

PROCEDURE PrintOwnerHeader( p_Owner IN VARCHAR2 ) IS
BEGIN
UTL_FILE.NEW_LINE( mOutputFile, 2 );
UTL_FILE.PUT_LINE( mOutputFile, '/*==================================' );
UTL_FILE.PUTF( mOutputFile, 'SCHEMA: %s', p_Owner );
UTL_FILE.NEW_LINE( mOutputFile, 1 );
UTL_FILE.PUT_LINE( mOutputFile, '==================================*/' );
END;
-- END PrintOwnerHeader;

PROCEDURE PrintObjectHeader( p_owner IN VARCHAR2, p_objectType IN VARCHAR2 ) IS
BEGIN
UTL_FILE.NEW_LINE( mOutputFile, 2 );
UTL_FILE.PUT_LINE( mOutputFile, '/*==================================' );
UTL_FILE.PUTF( mOutputFile, '%s SCHEMA OBJECT: %s', p_owner, p_objectType );
UTL_FILE.NEW_LINE( mOutputFile, 1 );
UTL_FILE.PUT_LINE( mOutputFile, '==================================*/' );
END;
-- END PrintObjectHeader;

PROCEDURE PrintFileHeader IS
BEGIN
UTL_FILE.PUT_LINE( mOutputFile, '/*==================================' );
UTL_FILE.PUTF( mOutputFile, 'Select database schemas as of: %s', TO_CHAR( SYSDATE, 'dd-mon-yyyy hh24:mi:ss' ) );
UTL_FILE.NEW_LINE( mOutputFile, 1 );
UTL_FILE.PUTF( mOutputFile, 'HOST NAME: %s', mDBHostName );
UTL_FILE.NEW_LINE( mOutputFile, 1 );
UTL_FILE.PUTF( mOutputFile, 'INSTANCE NAME: %s', mDBInstanceName );
UTL_FILE.NEW_LINE( mOutputFile, 1 );
UTL_FILE.PUTF( mOutputFile, 'INSTANCE NUMBER: %s', TO_CHAR( mDBInstanceNumber ) );
UTL_FILE.NEW_LINE( mOutputFile, 1 );
UTL_FILE.PUTF( mOutputFile, 'DB VERSION: %s', mDBVersion );
UTL_FILE.NEW_LINE( mOutputFile, 1 );
UTL_FILE.PUTF( mOutputFile, 'DB STARTUP TIME: %s', TO_CHAR( mDBStartupTime, 'dd-mon-yyyy hh24:mi:ss' ) );
UTL_FILE.NEW_LINE( mOutputFile, 2 );

UTL_FILE.PUT_LINE( mOutputFile, 'Selected Schemas:' );

FOR schemas IN ( SELECT COLUMN_VALUE FROM TABLE(CAST(mTypeSchemaArchive AS typeSchemaArchive)) ) LOOP
UTL_FILE.PUTF( mOutputFile, ' %s', schemas.COLUMN_VALUE );
UTL_FILE.NEW_LINE( mOutputFile, 1 );
END LOOP;

UTL_FILE.NEW_LINE( mOutputFile, 1 );
UTL_FILE.PUT_LINE( mOutputFile, '==================================*/' );
UTL_FILE.NEW_LINE( mOutputFile, 2 );
END;
-- END PrintFileHeader;

BEGIN
mDDLCursorID := DBMS_SQL.OPEN_CURSOR;

GetDBInfo;

OpenOutputFile( fileDirectory, schemaList, delimiter );

ParseSchemaList( schemaList, delimiter );

PrintFileHeader;

DBMS_METADATA.SET_TRANSFORM_PARAM( DBMS_METADATA.SESSION_TRANSFORM, 'STORAGE', FALSE );

FOR schemas IN ( SELECT owner, object_type, object_name FROM ALL_OBJECTS WHERE owner IN ( SELECT COLUMN_VALUE FROM TABLE(CAST(mTypeSchemaArchive AS typeSchemaArchive)) ) ORDER BY owner, object_type, object_name ) LOOP
BEGIN
IF mOwner IS NULL THEN
mOwner := schemas.owner;
mObjectType := schemas.object_type;

PrintOwnerHeader( mOwner );
PrintObjectHeader( mOwner, mObjectType );
ELSE
IF mOwner <> schemas.owner THEN
mOwner := schemas.owner;
mObjectType := schemas.object_type;

PrintOwnerHeader( mOwner );
PrintObjectHeader( mOwner, mObjectType );

ELSIF mObjectType <> schemas.object_type THEN
mObjectType := schemas.object_type;

PrintObjectHeader( mOwner, mObjectType );

END IF;
END IF;

mCLOB := DBMS_METADATA.GET_DDL( mObjectType, schemas.object_name, mOwner );

UTL_FILE.PUT( mOutputFile, DBMS_LOB.SUBSTR( mCLOB, DBMS_LOB.GETLENGTH( mCLOB ), 1 ) );
UTL_FILE.FFLUSH( mOutputFile );
EXCEPTION
WHEN OTHERS THEN
UTL_FILE.PUTF( mOutputFile, 'Error on %s: %s.%s: %s', mObjectType, mOwner, schemas.object_name, SQLERRM(SQLCODE));
UTL_FILE.NEW_LINE( mOutputFile, 1 );
UTL_FILE.FFLUSH( mOutputFile );
END;
END LOOP;

UTL_FILE.FCLOSE( mOutputFile );

DBMS_METADATA.SET_TRANSFORM_PARAM( DBMS_METADATA.SESSION_TRANSFORM, 'DEFAULT', TRUE );

DBMS_SQL.CLOSE_CURSOR( mDDLCursorID );
END;
/

Here's a sampling of the output from this:
/*==================================
Select database schemas as of: 11-aug-2005 15:52:32
HOST NAME: gilligan
INSTANCE NAME: gilligan
INSTANCE NUMBER: 1
DB VERSION: 9.2.0.5.0
DB STARTUP TIME: 10-aug-2005 14:33:41

Selected Schemas:
PSG
PERFSTAT

==================================*/




/*==================================
SCHEMA: PERFSTAT
==================================*/


/*==================================
PERFSTAT SCHEMA OBJECT: INDEX
==================================*/

CREATE UNIQUE INDEX "PERFSTAT"."STATS$BG_EVENT_SUMMARY_PK" ON "PERFSTAT"."STATS$BG_EVENT_SUMMARY" ("SNAP_ID", "DBID", "INSTANCE_NUMBER", "EVENT")
PCTFREE 10 INITRANS 2 MAXTRANS 255
TABLESPACE "TOOLS"

CREATE UNIQUE INDEX "PERFSTAT"."STATS$BUFFER_POOL_STATS_PK" ON "PERFSTAT"."STATS$BUFFER_POOL_STATISTICS" ("SNAP_ID", "DBID", "INSTANCE_NUMBER", "ID")
PCTFREE 10 INITRANS 2 MAXTRANS 255
TABLESPACE "TOOLS"

CREATE UNIQUE INDEX "PERFSTAT"."STATS$DATABASE_INSTANCE_PK" ON "PERFSTAT"."STATS$DATABASE_INSTANCE" ("DBID", "INSTANCE_NUMBER", "STARTUP_TIME")
PCTFREE 10 INITRANS 2 MAXTRANS 255
TABLESPACE "TOOLS"

CREATE UNIQUE INDEX "PERFSTAT"."STATS$DB_CACHE_ADVICE_PK" ON "PERFSTAT"."STATS$DB_CACHE_ADVICE" ("SNAP_ID", "DBID", "INSTANCE_NUMBER", "ID", "BUFFERS_FOR_ESTIMATE")
PCTFREE 10 INITRANS 2 MAXTRANS 255
TABLESPACE "TOOLS"

CREATE UNIQUE INDEX "PERFSTAT"."STATS$DLM_MISC_PK" ON "PERFSTAT"."STATS$DLM_MISC" ("SNAP_ID", "DBID", "INSTANCE_NUMBER", "STATISTIC#")
PCTFREE 10 INITRANS 2 MAXTRANS 255
TABLESPACE "TOOLS"

Wednesday, July 27, 2005

UNDO stats watching...

After we had some UNDO woes here at work, I had to go through the ugly process of figuring out how much UNDO space we could use and how to set our UNDO_RETENTION parameter. Oh, is this a confusing and frustrating process.

Here's some statistics of our current systems without the gory detail:

17 GB of UNDO tablespace and no room to grow.
18000 for the UNDO_RETENTION parameter.
A 3K+ second MAX(V$UNDOSTAT.MAXQUERYLEN)
Data from 8 days ago (rolling) to current in V$UNDOSTAT.
We periodically get ORA-01555 (snapshot too old) and ORA-30036 (unable to extend undo segment) errors.

Here's what I have discovered so far:
  1. Never read too far in posts on the internet. Flame wars distract you too much. No matter how entertaining it is to read Howard J. Rogers argue with Joel Garry, find what you need and get out. Time's 'a wastin'.

  2. UNDO is an inexact science.

  3. Oracle has a handle on things quite well.

  4. Oracle has a funky way of figuring out how much UNDO space you will need for a given UNDO_RETENTION parameter.

  5. Everyone has interesting ways to calculate UNDO space and the "optimal" UNDO_RETENTION parameter.

From Swiss company Akadia AG, I was able to derive some interesting statistics using some of the scripts they provided.
There formula for calculating optimal UNDO_RETENTION is this:
OPTIMAL UNDO RETENTION = ACTUAL UNDO SIZE / ( DB_BLOCK_SIZE * UNDO_BLOCK_PER_SEC )
This script will show the actual undo size:
SELECT SUM(a.bytes) "UNDO_SIZE"
FROM v$datafile a,
v$tablespace b,
dba_tablespaces c
WHERE c.contents = 'UNDO'
AND c.status = 'ONLINE'
AND b.name = c.tablespace_name
AND a.ts# = b.ts#;

In my case, this is the result:
   UNDO_SIZE
----------
1.7826E+10
(I think that equals 17GB...)

Next we get our UNDO blocks per second:
SELECT MAX(undoblks/((end_time-begin_time)*3600*24))
"UNDO_BLOCK_PER_SEC"
FROM v$undostat;

And again, my results:
UNDO_BLOCK_PER_SEC
------------------
651.855
And finally, the DB_BLOCK_SIZE:
SELECT TO_NUMBER(value) "DB_BLOCK_SIZE [KByte]"
FROM v$parameter
WHERE name = 'db_block_size';

And of course, my results one more time:
DB_BLOCK_SIZE [KByte]
---------------------
8192
Now using their formula, I get (I fixed the UNDO_SIZE number for this calculation):
 17825792000 / ( 651.855 * 8192 ) = 3338.17

Here's the script to do all of this at once:
SELECT d.undo_size/(1024*1024) "ACTUAL UNDO SIZE [MByte]",
SUBSTR(e.value,1,25) "UNDO RETENTION [Sec]",
ROUND((d.undo_size / (to_number(f.value) *
g.undo_block_per_sec))) "OPTIMAL UNDO RETENTION [Sec]"
FROM ( SELECT SUM(a.bytes) undo_size
FROM v$datafile a,
v$tablespace b,
dba_tablespaces c
WHERE c.contents = 'UNDO'
AND c.status = 'ONLINE'
AND b.name = c.tablespace_name
AND a.ts# = b.ts#
) d,
v$parameter e,
v$parameter f,
( SELECT MAX(undoblks/((end_time-begin_time)*3600*24))
undo_block_per_sec
FROM v$undostat
) g
WHERE e.name = 'undo_retention'
AND f.name = 'db_block_size'
/
And the output from this:
ACTUAL UNDO SIZE [MByte]
------------------------
17000

UNDO RETENTION [Sec]
-------------------------
18000

OPTIMAL UNDO RETENTION [Sec]
----------------------------
3338
Now, after all that, I still am nowhere. If I set the UNDO_RETENTION to 3338, we will get ORA-01555 errors. If I leave it at 18000, eventually we will get ORA-30036 as the UNDO tablespace fills up.

I would like to know how I can associate the current AND active data in V$UNDOSTAT with the current ACTIVE and UNEXPIRED data in DBA_UNDO_EXTENTS. How can I tell what is active and what is expired in V$UNDOSTAT? Or am I looking in the wrong place?

I'm giving this to Tom...

Thursday, June 02, 2005

CreateROUserLike.sql - Create read only user like

With this PL/SQL script I created, you can now create a read only version of a database user. This script will create the user with the extension "_RO" on the read/write user name. After it creates the user, it will grant EXECUTE on FUNCTION, PROCEDURE, PACKAGE and PACKAGE BODY object types and SELECT on others. Because a DATABASE LINK can't have a synonym or special grants, it recreates these in the read only user's schema.

Wednesday, May 18, 2005

Project idea: Open source performance tuning software

My idea has these features:

  • Use STATSPACK queries to gather and review statisitcs.

  • Store results locally for quicker review.

  • Realtime performance stats gathering.

  • More to come...



This project would be done in .NET with possible porting to MONO later on. This would mean that it would need to be limited in what it uses in the way of Microsoft based assemblies. This should be exciting plus free! Just gotta check with Oracle about the STATSPACK queries. But in thinking, this tool would rely on STATSPACK to be installed. So using the queries form the report shouldn't be a problem. Plus, being OSS there shouldn't be a licensing issue.

Next question, what should I use for a local data store? DB4O is an OSS non-relational database used by numerous companies, but will it work for this application? Are there other options similar to Access or DB4O?

Wednesday, March 30, 2005

Tuning Log Switches

Another good Redo Log article:
Tuning Log Switches

Oracle's Redo Log

An interesting article regarding Redo Logs:

Oracle's Redo Log

The redo log makes it possible to replay SQL statements. Before Oracle changes data in a datafile it writes these changes to the redo log. If something happens to one of the datafiles, a backed up datafile can be restored and the redo, that was written since, replied, which brings the datafile to the state it had before it became unavailable. The same technique is also used in a data guard environment (standby databases): One database (the primary database) records all changes and sends them to the standby databases. These standby databases in turn apply (reply) the arrived redo which keeps the synchronized with the primary database.

Monday, February 14, 2005

Automated alert log archive

Here's a set of scripts (package, PL/SQL and Korn) that when set in place and activated with a Unix cron job, will archive the alert log in a gzipped file. (Note: formatting is messed up in Blogger. Notice that missing pipe characters are missing.)

First, put this in place as a package:

CREATE OR REPLACE PACKAGE alert_log AS
PROCEDURE bdump_dir_setup;
PROCEDURE build_possible_alert_log_names;
PROCEDURE build_alertlog_name;
PROCEDURE check_alertlog_name;
PROCEDURE read(in_date_offset IN NUMBER);
PROCEDURE archive;
PROCEDURE remove(in_date_offset IN NUMBER, number_of_days IN NUMBER);
PROCEDURE list(in_date_offset IN NUMBER);
PROCEDURE INIT_ALERT_LOG;
END alert_log;
/

CREATE OR REPLACE PACKAGE BODY alert_log AS
bdump_dir VARCHAR2(500);
alertlist VARCHAR2(4000);
SID VARCHAR2(100);
comma VARCHAR2(1);
alertfile VARCHAR2(255);
itexists NUMBER;
fileat BFILE;
pointa INTEGER;
pointb INTEGER;
v_length INTEGER;
r_char10 RAW(4);
char10 VARCHAR2(1) := CHR(10);
startdate VARCHAR2(100);
r_startdate RAW(100);
buffer VARCHAR2(800);
buffer2 VARCHAR2(800);
vexists BOOLEAN;
vfile_length NUMBER;
vblocksize NUMBER;
date_offset NUMBER;
date_file VARCHAR2(255);
cursor_id INTEGER;
return_val INTEGER;
file_type UTL_FILE.FILE_TYPE;
sql_statement VARCHAR2(500);

PROCEDURE bdump_dir_setup IS
BEGIN
EXECUTE IMMEDIATE 'SELECT value '
' FROM v$parameter '
' WHERE name = ''background_dump_dest'''
INTO bdump_dir;

EXECUTE IMMEDIATE 'CREATE DIRECTORY BDUMP_DIR '
' AS '''
bdump_dir
'''';
END bdump_dir_setup;

PROCEDURE build_alertlog_name IS
BEGIN
alertlist := NULL;
comma := NULL;

EXECUTE IMMEDIATE 'SELECT value '
' FROM v$parameter '
' WHERE name = ''db_name'''
INTO SID;

build_possible_alert_log_names;

EXECUTE IMMEDIATE 'SELECT value '
' FROM v$parameter '
' WHERE name = ''instance_name'''
INTO SID;

build_possible_alert_log_names;

EXECUTE IMMEDIATE 'SELECT substr(global_name,1,'
' instr(global_name,''.'',-1)-1) '
' FROM global_name'
INTO SID;

build_possible_alert_log_names;
r_char10 := UTL_RAW.cast_to_raw(char10);
END build_alertlog_name;

PROCEDURE check_alertlog_name IS
BEGIN
pointa := 1;

LOOP
pointb := INSTR(alertlist, ',', pointa, 1);
v_length := pointb - pointa;
alertfile := SUBSTR(alertlist, pointa, v_length);
pointa := pointb + 1;
itexists :=
DBMS_LOB.fileexists(BFILENAME('BDUMP_DIR', alertfile));

IF itexists = 1 THEN
pointb := 0;
EXIT;
END IF;
END LOOP;
END check_alertlog_name;

PROCEDURE build_possible_alert_log_names IS
BEGIN
alertlist := alertlist comma 'alert_'
LOWER(SID) '.log';
comma := ',';
alertlist := alertlist comma UPPER(SID)
'ALRT.LOG';
END build_possible_alert_log_names;

PROCEDURE READ(in_date_offset IN NUMBER) IS
BEGIN
DBMS_OUTPUT.ENABLE(1000000);
build_alertlog_name;
check_alertlog_name;
date_offset := in_date_offset;

IF date_offset = 0 THEN
date_file := alertfile;
ELSE
date_file :=
TO_CHAR(SYSDATE - date_offset, 'YYYYMMDD')
'_'
alertfile;
END IF;

UTL_FILE.fgetattr('BDUMP_DIR',
date_file,
vexists,
vfile_length,
vblocksize);

IF vexists THEN
fileat := BFILENAME('BDUMP_DIR', date_file);
DBMS_LOB.fileopen(fileat, DBMS_LOB.file_readonly);
startdate := TO_CHAR(TRUNC(SYSDATE), 'Dy Mon DD');
r_startdate := UTL_RAW.cast_to_raw(startdate);
pointa := DBMS_LOB.INSTR(fileat, r_startdate, 1, 1);

IF pointa = 0 THEN
EXECUTE IMMEDIATE 'SELECT TO_CHAR(TRUNC(SYSDATE),''Dy Mon '' '
' DECODE(SUBSTR(TO_CHAR(TRUNC(SYSDATE),''DD''),1,1),0,'' '')'
' SUBSTR(TO_CHAR(TRUNC(SYSDATE),''DD''),2,1)'
' FROM dual'
INTO startdate;

r_startdate := UTL_RAW.cast_to_raw(startdate);
pointa := DBMS_LOB.INSTR(fileat, r_startdate, 1, 1);
END IF;

IF pointa != 0 THEN
LOOP
pointb := DBMS_LOB.INSTR(fileat, r_char10, pointa, 1);
EXIT WHEN pointb = 0;
v_length := pointb - pointa;
buffer := DBMS_LOB.SUBSTR(fileat, v_length, pointa);
buffer2 := UTL_RAW.cast_to_varchar2(buffer);
DBMS_OUTPUT.put_line(buffer2);
pointa := pointb + 1;
END LOOP;
END IF;

DBMS_LOB.fileclose(fileat);
END IF;
END READ;

PROCEDURE ARCHIVE IS
BEGIN
DBMS_OUTPUT.ENABLE(1000000);
build_alertlog_name;
check_alertlog_name;
UTL_FILE.frename('BDUMP_DIR',
alertfile,
'BDUMP_DIR',
TO_CHAR(SYSDATE, 'YYYYMMDD')
'_' alertfile);
SELECT directory_path
INTO bdump_dir
FROM all_directories
WHERE directory_name = 'BDUMP_DIR';

init_alert_log;

DBMS_OUTPUT.put_line('Archived to: '
' bdump_dir '/'
' TO_CHAR(SYSDATE, 'YYYYMMDD')
' '_' alertfile);
END ARCHIVE;

PROCEDURE remove(in_date_offset IN NUMBER,
number_of_days IN NUMBER) IS
BEGIN
build_alertlog_name;
check_alertlog_name;

FOR date_offset IN(in_date_offset - number_of_days)
.. in_date_offset LOOP
date_file :=
TO_CHAR(SYSDATE - date_offset, 'YYYYMMDD') '_'
alertfile;
UTL_FILE.fgetattr('BDUMP_DIR',
date_file,
vexists,
vfile_length,
vblocksize);

IF vexists THEN
UTL_FILE.fremove('BDUMP_DIR', date_file);
DBMS_OUTPUT.put_line(date_file ' - REMOVED');
END IF;
END LOOP;
END remove;

PROCEDURE LIST(in_date_offset IN NUMBER) IS
BEGIN
build_alertlog_name;
check_alertlog_name;

FOR date_offset IN 0 .. in_date_offset LOOP
IF date_offset = 0 THEN
date_file := alertfile;
UTL_FILE.fgetattr('BDUMP_DIR',
date_file,
vexists,
vfile_length,
vblocksize);

IF vexists THEN
DBMS_OUTPUT.put_line( 'OffSet : '
date_offset
', '
date_file
' '
vfile_length);
END IF;
END IF;

date_file :=
TO_CHAR(SYSDATE - date_offset, 'YYYYMMDD') '_'
alertfile;
UTL_FILE.fgetattr('BDUMP_DIR',
date_file,
vexists,
vfile_length,
vblocksize);

IF vexists THEN
DBMS_OUTPUT.put_line( 'OffSet : '
date_offset
', '
date_file
' '
vfile_length);
END IF;
END LOOP;
END LIST;

PROCEDURE INIT_ALERT_LOG IS
BEGIN

build_alertlog_name;
--check_alertlog_name;

-- ******************************************************
-- Open the alert log file for write access
-- ******************************************************
file_type := utl_file.fopen('BDUMP_DIR',alertfile,'W');

-- ******************************************************
-- Write the custom message to the alert log file
-- ******************************************************
utl_file.put_line(file_type,
'Alert log initialized by '
'HP_ALERT_LOG.INIT_ALERT_LOG on '
SYSDATE);

-- ******************************************************
-- Close the alert log file
-- ******************************************************
utl_file.fclose(file_type);

END INIT_ALERT_LOG;
BEGIN
DBMS_OUTPUT.ENABLE(1000000);
END alert_log;
/
Now, save the following as a PL/SQL script:
set serveroutput on;
exec alert_log.archive;
exit;


Finally, save this to a Korn shell script file executable by oracle Unix user only:

#!/bin/ksh
USER=psgPASS=psg4admin
ORATAB=/etc/oratab
# Set path if path not set (if called from /etc/rc)
case $PATH in
"") PATH=/bin:/usr/bin:/etc
export PATH
;;
esac
SAVE_PATH=$PATH
# Save LD_LIBRARY_PATHSAVE_LLP=$LD_LIBRARY_PATH
## Loop for every entry in oratab file and and try to shut down# that ORACLE#
cat $ORATAB while read LINE
do
case $LINE in
\#*) ;; #comment-line in oratab
*)
# Proceed only if third field is 'Y'.
if [ "$(echo $LINE awk -F: '{print $3}' -)" = "Y" ] ; then
ORACLE_SID=$(echo $LINE awk -F: '{print $1}' -)
if [ "$ORACLE_SID" = '*' ] ; then
ORACLE_SID=""
fi
# Called programs use same database ID
export ORACLE_SID
ORACLE_HOME=$(echo $LINE awk -F: '{print $2}' -)
# Called scripts use same home directory
export ORACLE_HOME
# Put $ORACLE_HOME/bin into PATH and export.
PATH=$ORACLE_HOME/bin:/bin:/usr/bin:/etc:$SAVE_PATH
export PATH
# add for bug 652997
LD_LIBRARY_PATH=${SAVE_LLP}:${ORACLE_HOME}/lib
export LD_LIBRARY_PATH
PFILE=${ORACLE_HOME}/dbs/init${ORACLE_SID}.ora
BDUMP_DIR=$(sqlplus $USER/$PASS @runAlertLogRead grep Archive cut -f3 -d' ')
gzip $BDUMP_DIR
PATH=$SAVE_PATH
fi
;;
esac
done



As the final step, add the Korn shell script to a cron job. This can then be run at regular intervals.

Wednesday, January 12, 2005

exp/imp Korn Shell scripts

Today, I am releasing two scripts that will allow the people here to export a schema and re-import it into another database. Both are Korn Shell scripts so obviously they are for Unix or variants.
=======================================================================
export.sh -
Usage: export.sh -p [dba_user_password] -d [tns_entry_of_db_2_exp] -f [export_file_name.dmp] -s [schema_nameschema_name,shema_name<,...>]

These parameters need to be in the order as shown. The -s option can either be a space or comma (no spaces) list.
========================================================================
import.sh -
Usage: import.sh -p [dba_user_password] -d [tns_entry_of_db_2_exp] -f [export_file_name.dmp]
========================================================================

In both scripts the -f option can have the full path to another directory, but will automatically change the path to the predefined variable using the filename supplied.

In both scripts if any option is omitted, the user will be prompted for the value.

Here are the scripts!:

export.sh
=======
#!/usr/bin/ksh
# export.sh - Korn Shell script to export (an) Oracle schema(s).

# January 6, 2005
# Author - John Baughman
# Hardcoded values: copy_dir, pipefile, dba
dba=PSG

copy_dir=/dbcopy/
# Check the dump directory exists
if [[ ! -d "${copy_dir}" ]]then
echo
echo "${copy_dir} does not exist !"
echo
exit 1
fi

pipefile=~/backup/backup_pipe
# Check we have created the named pipe file, or else there is no point
# continuing
if [[ ! -p ${pipefile} ]]then
echo
echo "Create the named pipe file ${pipefile} using "
echo " $ mknod p ${pipefile}"
echo
exit 1
fi

# Loop through the command line parameters
# -p - Currently the PSG user password.
# -d - The Oracle SID of the database to export. This matches the TNS entry.
# -s - The schema to import
# If either/all of these don't exist, prompt for them.
# Now, check the parameters...
while getopts ":p:d:f:s" opt; do
case $opt in
p ) psg_password=$OPTARG ;;
d ) sid=$OPTARG ;;
f ) raw_name=${OPTARG##/*/} ;;
s ) shift $((OPTIND))
raw_schema=$@ ;;
\? ) print "usage: export.sh -p password -d tns_entry -s schema[...]"
exit 1 ;;
esac
done
# Get the missing ${dba} user password

while [[ -z ${psg_password} ]]; do
read psg_password?"Enter ${dba} Password: "
done

# Get the missing SID
while [[ -z ${sid} ]]; do
read sid?"Enter Oracle SID: "
done

# Get the missing "raw" name
while [[ -z ${raw_name} ]]; do
read raw_name?"Enter the export dump file name: "
done

while [[ -z ${raw_schema} ]]; do
print "Enter the schema(s)"
read raw_schema?"(if more than one schema, they can either be space or comma delimited): "
done

# Fix up the raw schema list
schema_list=""
raw_schema=$(print $raw_schema tr "[a-z]" "[A-Z]")
for name in $raw_schema; do
if [[ -z $schema_list ]]; then
schema_list="${name}"
else
schema_list="${schema_list},${name}"
fi
done
raw_schema=${schema_list}
schema_list="(${schema_list})"

# Fix up the export file name here...
export_file=${copy_dir}${raw_name}.Z

# Let's go!!!
print ""
print "*******************************************************"
print "* Exporting ${raw_schema} schema(s) at `date`"
print "*******************************************************"
print

compress < ${pipefile} > ${export_file} &
exp ${dba}/${psg_password}@${sid} direct=y log=${copy_dir}${raw_name}.exp.log statistics=none buffer=1000000 feedback=10000 file=${pipefile} owner=${schema_list}

print
print "*******************************************************"
print "* Export Completed at `date`"
print "*******************************************************"
print

exit 0


import.sh
=======
#!/usr/bin/ksh
# import.sh - Korn Shell script to import (an) Oracle schema(s).

# January 6, 2005
# Author - John Baughman
# Hardcoded values: copy_dir, pipefile, dba

dba=PSG

copy_dir=/dbcopy/
# Check the dump directory exists
if [[ ! -d "${copy_dir}" ]]then
echo
echo "${copy_dir} does not exist !"
echo
exit 1
fi

pipefile=~/backup/backup_pipe
# Check we have created the named pipe file, or else there is no point
# continuing
if [[ ! -p ${pipefile} ]]then
echo
echo "Create the named pipe file ${pipefile} using "
echo " $ mknod p ${pipefile}"
echo
exit 1
fi

# Loop through the command line parameters
# -p - Currently the PSG user password.
# -d - The Oracle SID of the database to export. This matches the TNS entry.
# -f - The export file to import.
# If either/all of these don't exist, prompt for them.
# Now, check the parameters...
while getopts ":p:d:f:" opt; do
case $opt in
p ) psg_password=$OPTARG ;;
d ) sid=$OPTARG ;;
f ) raw_name=${OPTARG##/*/} ;;
\? ) print "usage: export.sh -p password -d tns_entry -f dump_file.dmp"
exit 1 ;;
esac
done

# Get the missing ${dba} user password
while [[ -z ${psg_password} ]]; do
read psg_password?"Enter ${dba} Password: "
done

# Get the missing SID
while [[ -z ${sid} ]]; do
read sid?"Enter Oracle SID: "
done

# Get the missing "raw" name
while [[ -z ${raw_name} ]]; do
read raw_name?"Enter the export dump file name: "
done

# Fix up the export file name here...
export_file=${copy_dir}${raw_name}.Z

# Let's go!!!
print
print "*******************************************************"
print "* Importing ${export_file} at `date`"
print "*******************************************************"
print

uncompress < ${export_file} > ${pipefile} imp ${dba}/${psg_password}@${sid} log=${copy_dir}${raw_name}.imp.log full=y statistics=none buffer=1000000 feedback=10000 file=${pipefile} ignore=y

print
print "*******************************************************"
print "* Import Completed at `date`"
print "*******************************************************"
print

exit 0

Sunday, January 09, 2005

Yes, the name says it all...

According to the Oracle reference: ORA-06950 = No Error

What a fantastic error message. Makes absolutely NO sense what so ever. I mean, why bother setting an error if there is none?

Hopefully, future posts will be more informative. Here's where I'll dump my Oracle knowledge for all to see (if you really care).

-John