Alle Beiträge von ghg

Materialized View with refresh fast Example

At first create a Table as basis for the Materialized view:


create table matviewtab as
with resNmb(iNmb) as
(select 1 as iNmb from dual
union all
select iNmb + 1 from resNmb where iNmb < 1005 ) select iNmb,'SomeText' as num_ber from resNmb;

Add a primary Key.

alter table matviewtab add constraint matviewtab_pk primary key (iNmb);

And create a Materialized View Log for capturing the operations on the basis-table.

CREATE MATERIALIZED VIEW LOG ON matviewtab WITH PRIMARY KEY, ROWID;

Then lets create the Materialized View with fast refresh option on commit:

CREATE MATERIALIZED VIEW matviewview
NOLOGGING
CACHE
BUILD IMMEDIATE
REFRESH FAST ON COMMIT
AS SELECT * from matviewtab;

Check the content:

select * from matviewview order by inmb desc;

INMB NUM_BER
1005 SomeText
1004 SomeText
1003 SomeText
1002 SomeText
1001 SomeText
1000 SomeText
999 SomeText
998 SomeText
997 SomeText
996 SomeText

Insert a row and commit and check the content again:

insert into matviewtab values(1006,'OtherTxt');
commit;


select * from (select * from matviewview order by inmb desc) where rownum < 11 order by inmb desc;

INMB NUM_BER
1006 OtherTxt
1005 SomeText
1004 SomeText
1003 SomeText
1002 SomeText
1001 SomeText
1000 SomeText
999 SomeText
998 SomeText
997 SomeText

Insert another Row and check for changes in the DB-Objects user_tab_modifications and MLOG$_matviewtab (MLOG$_<>):

insert into matviewtab values(1007,'OtherTxt');


select * from user_tab_modifications;

TABLE_NAME PARTITION_NAME SUBPARTITION_NAME INSERTS UPDATES DELETES TIMESTAMP TRUNCATED DROP_SEGMENTS
BIN$TPmrAsfFAN7gU6wZ4BSJ9w==$0     2 0 0 12.04.2017 16:00:54 NO 0


select * from MLOG$_matviewtab;

INMB M_ROW$$ SNAPTIME$$ DMLTYPE$$ OLD_NEW$$ CHANGE_VECTOR$$ XID$$
1007 AAFaYQAQAAAAAA9AAB 01.01.4000 00:00:00 I N FE 3096379362658352


commit;


select * from (select * from matviewview order by inmb desc) where rownum < 11 order by inmb desc;

INMB NUM_BER
1007 OtherTxt
1006 OtherTxt
1005 SomeText
1004 SomeText
1003 SomeText
1002 SomeText
1001 SomeText
1000 SomeText
999 SomeText
998 SomeText


drop MATERIALIZED VIEW LOG on matviewtab;
drop MATERIALIZED VIEW matviewview;
drop table matviewtab;

Additional Infos:
Materialized Views: how can I find the number of updates, inserts, and deletes applied during refresh?

SQL: Densification or Condensing of Date Ranges or History

— Verdichten/Densification von Zeitranges mit Berücksichtigung der Reihenfolge

Condensing/Densification or Merging of Date Ranges in one table with considering the chronology.
This is necessary if the Table is a subset of columns of the original table
There can be changes in attributes which are not in the table to condense causing the generation of new versions.
We now need to condense or merge the date ranges so we get as a result for every change in our table only one row.

/*
Verdichtet eine Tabelle mit BK und einer History,
falls UNMITTELBAR AUFEINANDER FOLGEND gleiche Attributsauspraegungen vorhanden sind.
Diese fallen dann zusammen.
—-
This logic condenses a table concerning consecutively history-rows with the same attributes.
These Rows are combined.
History-holes are considered!
*/

–drop table CONDENSING_TEST purge;

CREATE TABLE CONDENSING_TEST
(
H_KONTO_S_CID NUMBER, — = Primary key
H_KONTO_CID NUMBER, — = Businesskey
C_START_DT DATE,
C_END_DT DATE,
TESTKONTO_FLG NUMBER,
SONDERKONTO_FLG NUMBER,
EXISTIERT_FLG NUMBER,
BEZUG_FLG NUMBER,
BARZAHLUNG_FLG NUMBER
)
TABLESPACE CORE_DTA;


SET DEFINE OFF;
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('28.06.1984 00:00:00','DD.MM.RRRR HH24:MI:SS'),to_date('06.02.2003 11:49:47','DD.MM.RRRR HH24:MI:SS'), '1','-1','1','1','1');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('06.02.2003 11:49:48','DD.MM.RRRR HH24:MI:SS'),to_date('13.08.2004 14:23:09','DD.MM.RRRR HH24:MI:SS'), '2','-1','1','1','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43482',to_date('03.03.1994 00:00:00','DD.MM.RRRR HH24:MI:SS'),to_date('31.12.2009 23:59:59','DD.MM.RRRR HH24:MI:SS'), '3','-1','1','0','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43482',to_date('03.03.1994 00:00:00','DD.MM.RRRR HH24:MI:SS'),to_date('31.12.2009 23:59:59','DD.MM.RRRR HH24:MI:SS'), '4','-1','1','0','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('13.08.2004 14:23:10','DD.MM.RRRR HH24:MI:SS'),to_date('13.08.2004 14:23:24','DD.MM.RRRR HH24:MI:SS'), '5','-1','1','0','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('13.08.2004 14:23:25','DD.MM.RRRR HH24:MI:SS'),to_date('14.03.2005 09:57:42','DD.MM.RRRR HH24:MI:SS'), '6','-1','1','1','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('14.03.2005 09:57:43','DD.MM.RRRR HH24:MI:SS'),to_date('07.10.2005 09:59:31','DD.MM.RRRR HH24:MI:SS'), '7','-1','1','1','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('07.10.2005 09:59:32','DD.MM.RRRR HH24:MI:SS'),to_date('31.12.9999 23:59:59','DD.MM.RRRR HH24:MI:SS'), '8','-1','1','0','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43482',to_date('01.01.2010 00:00:00','DD.MM.RRRR HH24:MI:SS'),to_date('31.12.9999 23:59:59','DD.MM.RRRR HH24:MI:SS'), '9','-1','1','0','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43482',to_date('01.01.2010 00:00:00','DD.MM.RRRR HH24:MI:SS'),to_date('31.12.9999 23:59:59','DD.MM.RRRR HH24:MI:SS'),'10','-1','1','0','0');
commit;

/*
Beruecksichtigte beschreibende Attribute:
Considered descriptive Attributes:
-TESTKONTO_FLG
-SONDERKONTO_FLG
-EXISTIERT_FLG
-BEZUG_FLG
-BARZAHLUNG_FLG
*/
select H_KONTO_CID
,C_START_DT
,C_END_DT
,TESTKONTO_FLG
,SONDERKONTO_FLG
,EXISTIERT_FLG
,BEZUG_FLG
,BARZAHLUNG_FLG
from condensing_test
order by h_konto_cid,C_START_DT;

H_KONTO_CID C_START_DT C_END_DT TESTKONTO _FLG SONDERKONTO _FLG EXISTIERT _FLG BEZUG _FLG BARZAHLUNG _FLG COMMENT
43481 28.06.1984 00:00:00 06.02.2003 11:49:47 0 -1 1 1 1
43481 06.02.2003 11:49:48 13.08.2004 14:23:09 0 -1 1 1 0
43481 13.08.2004 14:23:10 13.08.2004 14:23:24 0 -1 1 0 0
43481 13.08.2004 14:23:25 14.03.2005 09:57:42 0 -1 1 1 0 Row 4/5 have to condense
43481 14.03.2005 09:57:43 07.10.2005 09:59:31 0 -1 1 1 0 Row 4/5 have to condense
43481 07.10.2005 09:59:32 31.12.9999 23:59:59 0 -1 1 0 0
43482 03.03.1994 00:00:00 31.12.2009 23:59:59 0 -1 1 0 0 Row 7/8/9/10 have to condense
43482 03.03.1994 00:00:00 31.12.2009 23:59:59 0 -1 1 0 0 Row 7/8/9/10 have to condense
43482 01.01.2010 00:00:00 31.12.9999 23:59:59 0 -1 1 0 0 Row 7/8/9/10 have to condense
43482 01.01.2010 00:00:00 31.12.9999 23:59:59 0 -1 1 0 0 Row 7/8/9/10 have to condense

— H_KONTO_CID/HUB_CID entspricht dem Businesskey aus der Business-View
— H_KONTO_CID/HUB_CID correlates to the Businesskey, coming from the Business-View-Logic

with res01 as
(select H_KONTO_CID as hub_cid
,C_START_DT as C_START_DT
,C_END_DT as C_END_DT
,testkonto_flg as attrib1
,lag ( testkonto_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib1_LAG
,lead( testkonto_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib1_LEAD
,sonderkonto_flg as attrib2
,lag ( sonderkonto_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib2_LAG
,lead( sonderkonto_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib2_LEAD
,existiert_flg as attrib3
,lag ( existiert_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib3_LAG
,lead( existiert_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib3_LEAD
,bezug_flg as attrib4
,lag ( bezug_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib4_LAG
,lead( bezug_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib4_LEAD
,barzahlung_flg as attrib5
,lag ( barzahlung_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib5_LAG
,lead( barzahlung_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib5_LEAD
from condensing_test
where h_konto_cid in (43481,43482)
)
,
res02 as
(-- nimm die Zeile, wenn aktuelle Auspraegung nicht der Vorgaengerversion entspricht
-- take the row if current occurrence not equal to the previous occurrence
select HUB_CID
,C_START_DT as CHANGE
,attrib1
,attrib2
,attrib3
,attrib4
,attrib5
,1 as TAKEROW
from res01
where (attrib1_LAG != attrib1 or attrib1_LAG is null)
or (attrib2_LAG != attrib2 or attrib2_LAG is null)
or (attrib3_LAG != attrib3 or attrib3_LAG is null)
or (attrib4_LAG != attrib4 or attrib4_LAG is null)
or (attrib5_LAG != attrib5 or attrib5_LAG is null)
union all
-- nimm die Zeile NICHT, wenn sich danach was geaendert hat, d.h.
-- wenn aktuelle Auspraegung nicht der Nachfolgeversion entspricht
-- don't take the row, if current occurrence is not equals to the previous occurrence
select HUB_CID
,C_END_DT as CHANGE
,attrib1
,attrib2
,attrib3
,attrib4
,attrib5
,0 as TAKEROW
from res01
where (attrib1_LEAD != attrib1 or attrib1_LEAD is null)
or (attrib2_LEAD != attrib2 or attrib2_LEAD is null)
or (attrib3_LEAD != attrib3 or attrib3_LEAD is null)
or (attrib4_LEAD != attrib4 or attrib4_LEAD is null)
or (attrib5_LEAD != attrib5 or attrib5_LEAD is null)
)
,
res03 as
(
select HUB_CID
,CHANGE as C_START_DT
,lead( CHANGE, 1 ) over (partition by HUB_CID order by CHANGE) as C_END_DT --HUB_CID anstatt TABLE_CID
,attrib1
,attrib2
,attrib3
,attrib4
,attrib5
,TAKEROW
from res02)
select HUB_CID as H_KONTO_CID
,C_START_DT as C_START_DT
,C_END_DT as C_END_DT
,0 AS c_audit_cid
,attrib1 as testkonto_flg
,attrib2 as sonderkonto_flg
,attrib3 as existiert_flg
,attrib4 as bezug_flg
,attrib5 as barzahlung_flg
from res03
where TAKEROW = 1
order by HUB_CID, C_START_DT;

EXECUTE IMMEDIATE with USING and OUT Parameter

If you want to use an OUT – Parameter in an EXECUTE IMMEDIATE – Statement including an anonymous PL/SQL-Block: here is an example:

create table in_out_test ( num_ber number);


set serveroutput on;
DECLARE
vc_sql CLOB;
p_insert_count NUMBER;
BEGIN
vc_sql := '
DECLARE
cursor curData is
with resNmb(iNmb) as
(select 1 as iNmb from dual
union all
select iNmb + 1 from resNmb where iNmb < 1005
)
select iNmb as num_ber from resNmb;
type typeTblData is table of curData%ROWTYPE;
tblData typeTblData;
iterator NUMBER := 0;
insert_count NUMBER;
begin
open curData;
loop
fetch curData bulk collect into tblData limit 100;
exit when tblData.COUNT = 0;
forall ii in tblData.FIRST..tblData.LAST
INSERT INTO in_out_test
values tblData(ii) ;
insert_count := (iterator*100)+Sql%Rowcount;
:1 := insert_count;
commit;
iterator := iterator +1;
end loop;
close curData;
exception
when others then
rollback;
if curData%ISOPEN then close curData; end if;
raise;
end;
';
execute immediate vc_sql using out p_insert_count;
dbms_output.put_line('IN OUT PARAMETER: '||p_insert_count||' rows inserted');
END;
/

>> PL/SQL procedure successfully completed.
>> IN OUT PARAMETER: 1005 rows inserted

drop table in_out_test purge;

Errorhandling in BULK INSERT FORALL

Errorhandling in BULK INSERT FORALL with error_index and bulk_exceptions

Additional Info:
To avoid Termination after a error By Steven Feuerstein


select * from user_source;
desc user_source;

create table bulk_error_test as select * from user_source where 1=0;

-- add check-constraint to provoke error
alter table bulk_error_test add constraint bulk_error_test_chk check (line > 1);
-------------------------------------------
set serveroutput on;
DECLARE
type typeTblData is table of bulk_error_test%ROWTYPE;
tblData typeTblData;
type typeTblID is table of integer;
tblID typeTblID;
bulk_error exception;
PRAGMA EXCEPTION_INIT (bulk_error, -24381);
nCnt number:=0;
cursor curData is select name,type,line,text from user_source;
BEGIN

OPEN curData;
LOOP
fetch curData bulk collect into tblData limit 500;
exit when tblData.COUNT = 0;

forall ii in tblData.FIRST..tblData.LAST save exceptions
insert into bulk_error_test values tblData(ii);

nCnt:=nCnt+sql%rowcount;

commit;
END LOOP;

CLOSE curData;

dbms_output.put_line ('Anzahlz Inserts ' || nCnt);

EXCEPTION

when bulk_error then
dbms_output.put_line('bulk_error');
for jj in 1 .. sql%bulk_exceptions.count
loop
dbms_output.put_line(
substr(
' Name: '||tblData(SQL%BULK_EXCEPTIONS(jj).error_index ).name ||
' Type: '||tblData(SQL%BULK_EXCEPTIONS(jj).error_index ).type ||
' Line: '||tblData(SQL%BULK_EXCEPTIONS(jj).error_index ).line ||
' Error_Index-ID:'||to_char(SQL%BULK_EXCEPTIONS(jj).error_index)||
' ERROR: '||sqlerrm(-SQL%BULK_EXCEPTIONS(jj).error_code),1,2000)||
' ERROR: '||DBMS_UTILITY.FORMAT_ERROR_STACK
);
end loop;
if curData%ISOPEN then close curData; end if;
raise;

when others then
dbms_output.put_line('others');
dbms_output.put_line(DBMS_UTILITY.FORMAT_ERROR_STACK);
raise;
END;

/*
Error report -
ORA-24381: Fehler in Array-DML
ORA-06512: in Zeile 47
24381. 00000 - "error(s) in array DML"
*Cause: One or more rows failed in the DML.
*Action: Refer to the error stack in the error handle.
bulk_error
Name: UT_UTREPORT Type: PACKAGE BODY Line: 1 Error_Index-ID:1 ERROR: ORA-02290: CHECK-Constraint (.) verletzt ERROR: ORA-24381: Fehler in Array-DML
Name: UT_UTREPORT Type: PACKAGE Line: 1 Error_Index-ID:148 ERROR: ORA-02290: CHECK-Constraint (.) verletzt ERROR: ORA-24381: Fehler in Array-DML
Name: UT_UTOUTPUT Type: PACKAGE Line: 1 Error_Index-ID:157 ERROR: ORA-02290: CHECK-Constraint (.) verletzt ERROR: ORA-24381: Fehler in Array-DML
Name: UTOUTCOME Type: PACKAGE Line: 1 Error_Index-ID:198 ERROR: ORA-02290: CHECK-Constraint (.) verletzt ERROR: ORA-24381: Fehler in Array-DML
Name: UT_UTOUTPUT Type: PACKAGE BODY Line: 1 Error_Index-ID:253 ERROR: ORA-02290: CHECK-Constraint (.) verletzt ERROR: ORA-24381: Fehler in Array-DML
Name: UTPIPE Type: PACKAGE BODY Line: 1 Error_Index-ID:471 ERROR: ORA-02290: CHECK-Constraint (.) verletzt ERROR: ORA-24381: Fehler in Array-DML
*/

select * from bulk_error_test;

truncate table bulk_error_test drop storage;

drop table bulk_error_test purge;

ORACLE-View containing ’select * from table‘

A view containing ’select * from table‘ takes/selects only the attributes from the creation-time.
Attributes added afterwards to the base-table are not considered in the view anymore!
The view stays valid.

drop view viewselectstern;
drop table tableselectstern;

create table tableselectstern
(
id number,
inhalt1 varchar2 (50),
inhalt2 varchar2 (50),
inhalt3 varchar2 (50)
);
insert into tableselectstern values (1,’eins‘,’zwei‘,’drei‘);
commit;

create or replace view viewselectstern as select * from tableselectstern;

select table_name,column_name,column_id from user_tab_cols where table_name = ‚TABLESELECTSTERN‘ order by column_id;
/*
TABLE_NAME COLUMN_NAME COLUMN_ID
TABLESELECTSTERN ID 1
TABLESELECTSTERN INHALT1 2
TABLESELECTSTERN INHALT2 3
TABLESELECTSTERN INHALT3 4
*/
select table_name,column_name,column_id from user_tab_cols where table_name = ‚VIEWSELECTSTERN‘ order by column_id;
/*
TABLE_NAME COLUMN_NAME COLUMN_ID
VIEWSELECTSTERN ID 1
VIEWSELECTSTERN INHALT1 2
VIEWSELECTSTERN INHALT2 3
VIEWSELECTSTERN INHALT3 4
*/
select * from viewselectstern;
/*
ID INHALT1 INHALT2 INHALT3
1 eins zwei drei
*/
alter table tableselectstern add inhalt4 varchar2(50);

select * from viewselectstern;
/*
ID INHALT1 INHALT2 INHALT3
1 eins zwei drei
*/

select table_name,column_name,column_id from user_tab_cols where table_name = ‚TABLESELECTSTERN‘;
/*
TABLE_NAME COLUMN_NAME COLUMN_ID
TABLESELECTSTERN ID 1
TABLESELECTSTERN INHALT1 2
TABLESELECTSTERN INHALT2 3
TABLESELECTSTERN INHALT3 4
TABLESELECTSTERN INHALT4 5
*/
select table_name,column_name,column_id from user_tab_cols where table_name = ‚VIEWSELECTSTERN‘ order by column_id;
/*
TABLE_NAME COLUMN_NAME COLUMN_ID
VIEWSELECTSTERN ID 1
VIEWSELECTSTERN INHALT1 2
VIEWSELECTSTERN INHALT2 3
VIEWSELECTSTERN INHALT3 4
*/

SQL Developer 4.2 Early Adopter

Since a few days it is possible to download the new SQL Developer 4.2 in the Early Adopter Edition.

Infos about the new features can be found here by ORACLE or by ThatJeffSmith.

Here the list of new features coming from ORACLE:

  • In-Line Editing: simply arrow around your currrent statement or script without needing to invoke an external editor
  • Automatic Formatting: SQL output can be formatted to fit your screen or to CSV, JSON, Delimited, HTML, Insert statements, or XML
  • New Commands: DDL, CTAS, REPEAT, INFO (an updated version of DESC), and many more. Use the HELP command for a full list.
  • Client-Side Scripting: Use JavaScript in combination with SQL and PL/SQL
  • SQL History: Records previous 100 executed statements and scripts and can be recalled with up and down arrow keys
  • Tab Completion: Get assitance with object names, keywords, and more with the TAB key
  • Oracle Database Exadata Express Cloud Service support
  • New command, SET ENCODING. Example, SET ENCODING UTF-8, allows for reading a file for a certain encoding
  • SSHTUNNEL now allows user to change default SSH port 22. Example, username@hotname:port
  • INFO and CTAS commands now support TAB completion for object names.
  • SET WALLET no longer requires a password for the wallet.

Data Vault: Possibilities to add a new descriptive Attribute

Problem: The Source-System added a new descriptive attribute to a source-table.
We need to include the new data in our Raw-Data-Vault(-Satellite)-World.

We have two possibilities:

  1. Add a new Hub-Satellite which contains besides the metadata only the new attribute from the time on the attribute appeared.
  2. Stop filling the currrent Hub-Satellite and fill a new Satellite which is a structural copy from the old Satellite plus the new attribute from the time on the attribute appeared

Case 1.: I think this is the normal case. The existing dataflow based on the data without the new Attribute is untouched and makes still sense for the business so there is no need to change or anounce anything concerning the dataflow or any report based on the information without the new Attribute.
We can develope an extended dataflow containing the new Attribute and a new report which is very similar to the ‚old‘ report without the new Attribute.
After successfully completion we can set the old report to ‚deprecated‘, if there is no need to use the old report because the new report, containing the new Attribute, covers the old situation, too. After a given period of time the business should use only the new report and the old one can be removed.

Case 2.: These situation is a complete different one: The old satellite will be not filled anymore after appearance of the new attribute. So the old dataflow and reports based on the old satellite shows data only till the arrival of the new attribute-data.
This could be necessary if the new attriute is a game-changer concerning the old characteristic of the data so that the old report does not make sense anymore by knowing the new attribute.
So the old report shows only the data before and a an new report shows only the data from the date on the new attribute-values are available.

It is of course possible to connect the old an the new satellite by a view for example, but here we have the problem of the values for the new attribute in the history…

Any thought on this topic is appreciated!

ORA-04068: Existing state of packages has been discarded

To avoid after changing a Package/Procedure/Function… such a Exception

ERROR at line 1:
ORA-04068: existing state of packages has been discarded
ORA-04061: existing state of package body "<SCHEMA.PACKAGE_NAME>" has been invalidated
ORA-04065: not executed, altered or dropped package body "<SCHEMA.PACKAGE_NAME>"
ORA-06508: PL/SQL: could not find program unit being called: "<SCHEMA.PACKAGE_NAME>"
ORA-06512: at "<SCHEMA.DB_OBJECT>", line xxx
ORA-06512: at line 1

it is necessary to:

  • disconnect and reconnect
    or
  • execute  sys.dbms_session.reset_package

before running/starting the same Procedure/Function

More Information can be found here.

ORACLE Interval-Partitioning with NUMBER

ORACLE 11.2
We needed a listpartitioning for every load of an DWH, but of course we wanted to have the comfort of the autmatic partition-generation.
This can be achieved with Interval-Partitioning not with Date but with Number.
It was not easy to find a example so I want to show here a simple Table for which every new Load or Better Insert with a new C_LADE_LAUF_CID generates a new Partition.
We used PCTFREE 0 (of course on partition-level) to avoid unused space, because there were no updates on this table.
We set the initial extend to 1M  to avoid having  a lot of empty partitions allocating unnecessarily storage on creating the partition(s).

To change the behavior of the big inital extend(8M instead of 64k) on partitioning beginning with ORACLE 11.2.0.2 on system-level  read this.

drop table INTERVAL_NUMBER purge;
CREATE TABLE interval_number (
C_LADE_LAUF_CID   NUMBER(5)
,begdate           DATE
,enddate           DATE
,description       VARCHAR2(100)
)
PARTITION BY RANGE (C_LADE_LAUF_CID) INTERVAL (1)
( PARTITION P1 VALUES LESS THAN (2) ) PCTFREE 0 STORAGE (INITIAL 1M)
;


insert into interval_number values (1,sysdate,sysdate,'partition 1');
insert into interval_number values (2,sysdate,sysdate,'partition 2');
insert into interval_number values (3,sysdate,sysdate,'partition 3');
commit;

After generation of statistics, we can check what happened:

exec DBMS_STATS.GATHER_TABLE_STATS (ownname => 'CORE_TEST',tabname => 'interval_number',estimate_percent => dbms_stats.auto_sample_size);


Select for the partitions
select segment_name, partition_name, blocks, bytes/1024 as KB
from dba_segments
where segment_name = 'INTERVAL_NUMBER';

SEGMENT_NAME    PARTITION_NAME BLOCKS KB

INTERVAL_NUMBER P1             128 1024
INTERVAL_NUMBER SYS_P12666     128 1024
INTERVAL_NUMBER SYS_P12667     128 1024


Select for the complete table
(pctfree is on partitionlevel, so here null)
select table_name, pct_free, num_rows, partitioned
from user_tables
where table_name = 'INTERVAL_NUMBER';

TABLE_NAME      PCT_FREE NUM_ROWS PARTITIONED

INTERVAL_NUMBER null     3        YES


Select for the pctfree-value on partition-level
select def_pct_free
from USER_PART_TABLES
where table_name = 'INTERVAL_NUMBER';
DEF_PCT_FREE
0

Select for the initial extend for the partitions
select partition_name, initial_extent/1024 as initial_extent_kb
from user_tab_partitions
where table_name = 'INTERVAL_NUMBER';
PARTITION_NAME, INITIAL_EXTEND_KB
P1              1024
SYS_P13229      1024
SYS_P13230      1024