Skip to content

Instantly share code, notes, and snippets.

@rmorenobello
Last active September 9, 2024 11:09
Show Gist options
  • Save rmorenobello/fcc34781531fa6f65b41f99e80759d13 to your computer and use it in GitHub Desktop.
Save rmorenobello/fcc34781531fa6f65b41f99e80759d13 to your computer and use it in GitHub Desktop.
ORACLE DB - snippets, tips, best practices
-- ¡¡¡¡Resumenes con ejemplos de cada tema!!!!
http://www.morganslibrary.org/library.html
https://lalitkumarb.wordpress.com/2014/05/31/oracle-explain-plan/
-- EXPLAIN PLAN FOR
-- per consultar resultat:
set linesize 132;
SELECT * FROM TABLE(dbms_xplan.display);
-- MULTI HILO/threading (implica varias conexiones externas recogiendo datos => no es lo mismo que PARALLEL)
-- multi-threading distribution for Oracle
-- ej. en PDI kettle => si te capan vel / conexión, multiplicas al usar varias conexiones:
AND ORA_HASH(OPERACIONEXT_ID, (${tableInputNumOfCopies} - 1) ) = ${Internal.Step.CopyNr}
------ A recordar ------
-- NULL no es comparable => no cumple queries como:
-- columna = <condicion> OR columna != <condicion>
-------------------DBMS_ERRLOG-----------------------------
/* Va haciendo la operacion fila a fila excepto las filas que dan error,
a diferencia de la ejecución normal en que se cancela toda la operación.
*/
-- DBMS_ERRLOG - logar errores con detalle:
https://oracle-base.com/articles/10g/dml-error-logging-10gr2
-- Create error logging table. Default name.
begin
dbms_errlog.create_error_log (dml_table_name => 'dest');
end;
/
-- creates same name with "ERR$_" prefix.
-- example:
insert into dest
select * from source
log errors into err$_dest ('INSERT') reject limit 100; -- unlimited
-- si fuese UPDATE ponemos ('UPDATE') etc (lo guarda en el campo ORA_ERR_TAG$
-------------------------------------------------------------
-------------------- FLAG COLUMNS ----------------
https://asktom.oracle.com/pls/apex/f?p=100:11:0::::P11_QUESTION_ID:6263249199595
since
...,
flag char(1) check (flag in ( 'Y', 'N' )),
...,
serves the same purpose, requires the same amount of space and does the same thing - I guess we feel this is a feature we can let them have that we really don't need.
If you would like TRUE/FALSE, we can accomplish that easily with DECODE(flag,'Y','TRUE','N','FALSE')
----------------------------------- STRING functions ----------------------
-- *** SPLIT / separar una sola string separada por comas en varias filas
-- https://lalitkumarb.wordpress.com/2014/12/02/split-comma-delimited-string-into-rows-in-oracle/
-- *** SPLIT / separar todas las string con valores separados por comas de una columna en varias filas
-- https://lalitkumarb.wordpress.com/2015/03/04/split-comma-delimited-strings-in-a-table-in-oracle/
---------- NUMBERS -----
-- keep only n leftmost digits
select trunc(20161231/10000) from dual;
-- keep only n rightmost digits
select MOD(20161231,10000) from dual;
-- LONG type (SQL, no el de PL/SQL)
-- NOTE: PL/SQL has a LONG datatype of its own, but is actually a subtype defined as a VARCHAR2(32767).
-- http://www.oracle-developer.net/display.php?id=430
-- 4 methods to work with ORACLE DDL LONG data type (also if >32765 "chars")
/*
Creando funcion pipelined, que simula una tabla al vuelo
SQL> CREATE TYPE nullability_ot AS OBJECT
2 ( table_name VARCHAR2(30)
3 , column_name VARCHAR2(30)
4 , search_condition CLOB <-- el cursor convierte LONG a VARCHAR2; probar si se puede definir como VARCHAR2
5 );
6 /
SQL> CREATE TYPE nullability_ntt AS TABLE OF nullability_ot;
2 /
SQL> CREATE FUNCTION nullability_pipelined RETURN nullability_ntt PIPELINED AS
2 BEGIN
3 FOR r IN (SELECT * FROM nullability_view) LOOP
4 PIPE ROW ( nullability_ot(r.table_name,
5 r.column_name,
6 r.search_condition) );
7 END LOOP;
8 RETURN;
9 END;
10 /
SQL> SELECT *
2 FROM TABLE(nullability_pipelined)
3 WHERE UPPER(search_condition) LIKE '%IS NOT NULL%';
*/
-- ***** FILTERING RESULTS *****
-- * Filter rows with "Infinity" values (resulting from X/0, etc.)
WHERE col is infinite;
where col = binary_double_infinity;
---------------------- CREATE TABLE ---------------------------
-- global TEMPORARY tables (server managed)
-- son privadas, existen sólo en la sesión.
-- If the TRUNCATE statement is issued against a temporary table, only the session specific data is truncated. There is no affect on the data of other sessions.
CREATE GLOBAL TEMPORARY TABLE my_temp_table (
id NUMBER,
description VARCHAR2(20)
)
-- Se puede escoger que al hacer commit NO se borren sus filas con:
ON COMMIT PRESERVE ROWS;
-- ********* COPY / BACKUP TABLE *****
-- Use NOLOGGING: This turns off logging, making for a faster copy. However, if you must recover, you will not be able to roll-forward through this operation.
-- Use the parallel clause: Since a table copy does a full table scan of both tables, parallel query will make the copying far faster, (up to cpu_count-1 faster).
-- In this example we fast copy data from one table to another table on a 16 CPU server:
create table
newtab
parallel 15 nologging
as
select /*+parallel(source 15) */ *
from
oldtab;
-- *** INDEXES
-- BITMAP indexes -----------------------------------------------------------------------
-- http://www.dba-oracle.com/oracle_tips_bitmapped_indexes.htm
-- When to use:
-- * Required:
-- 1. low cardinality ( < 100 distinct, a ser posible < 10)
-- 2. LOW DML: low insert/update/delete activity. Update bitmap index takes a lot of resources.
-- Best for largely read-only tables and tables batched updated nightly.
-- * Not required but greater performance boost:
-- 3. Queries against multiple low cardinality columns => create bitmap for each column involved => great performance.
-- Troubleshooting:
-- 1. Small table - The CBO may force a full-table scan if your table is small!
-- 2. Bad stats - Make sure you always analyze the bitmap with dbms_stats right after creation:
CREATE BITMAP INDEX emp_bitmap_idx ON index_demo (gender);
-- execute statistics:
exec dbms_stats.gather_index_stats(OWNNAME=>'SCOTT', INDNAME=>'EMP_BITMAP_IDX');
-- 3. Test with a hint - To force the use of your new bitmap index, just use a Oracle INDEX hint:
select /*+ index(emp emp_bitmap_idx) */
count(*)
from emp, dept
where emp.deptno = dept.deptno;
CREATE INDEX "BISLT_INT_H"."IDX_FT_HC3_ACC_WS_1" ON "BISLT_INT_H"."FT_HC3_ACC_WS" ("PWS_DAL")
PARALLEL (DEGREE 4) TABLESPACE "BISLT_INT_H_DAT" ;
-----------------------------------------------------------------------------------------------------------------
-- Crear PK de forma independiente a la creación de la tabla:
ALTER TABLE "DWH_GCI"."DT_OPE_TARJETA" ADD CONSTRAINT "DT_OPE_TARJETA_PK" PRIMARY KEY ("ID_TAR_FECHA_OPE_TARJETA", "ID_TAR_OPE_TARJETA")
USING INDEX TABLESPACE DWH_GCI_INX LOCAL;
SELECT * FROM DWH_ADM.DWH_ADM_STATUS_CONSTRAINTS;
SELECT * FROM ALL_CONSTRAINTS WHERE TABLE_NAME LIKE '%TARJETA';
SELECT I.STATUS, I.* FROM ALL_INDEXES I WHERE TABLE_NAME = 'DT_OPE_TARJETA'; -- N/A si esta a nivel local de cada particion, VALID si esta ok como GLOBAL.
SELECT DISTINCT I.STATUS, I.INDEX_NAME FROM ALL_IND_PARTITIONS i WHERE INDEX_NAME LIKE 'DT_OPE_TARJETA%' AND STATUS <> 'USABLE'; -- a nivel partición. USABLE las que estan OK.
-- IMPORTANTE: no poner el esquema!! sólo nombre tabla y lanzar en esquema correspondiente.
BEGIN
-- importante que esté habilitada la constraint cuando se lanze porque rehabilitar lo dejará como estaba al lanzar deshabilitar (guarda el estado en DWH_ADM.DWH_ADM_STATUS_CONSTRAINTS)
dwh_adm.pkg_adm.deshabilitar_constraints('DT_OPE_TARJETA');
dwh_adm.pkg_adm.ejecutar_sql('ALTER INDEX DT_OPE_TARJETA_PK UNUSABLE'); -- deshab constraints actualmente no deshab su índice, y deshab indices no deshab los UNIQUE
dwh_adm.pkg_adm.deshabilitar_indices('DT_OPE_TARJETA');
END;
/
begin
DWH_ADM.PKG_ADM. RECONSTRUIR_INDICES_NO_USABLES (); -- reactivamos indices particionados/locales
dwh_adm.pkg_adm.rehabilitar_constraints('DT_OPE_TARJETA');
-- dwh_adm.pkg_adm.rehabilitar_indices('DT_OPE_TARJETA'); -- parece no servir con particionados/locales
end;
/
-- ** Change GLOBAL index to LOCAL:
-- https://dba.stackexchange.com/questions/24393/oracle-11-change-global-index-to-local-on-huge-production-table
-- NOTA: si es indice de PK alter table drop constraint primero
-- I found solution making creating index quite faster... (on Oracle 11g works fine)
drop index I_EMPLOYEE_SALARY_T;
create index I_EMPLOYEE_SALARY_T on EMPLOYEE (SALARY, DEPARTAMENT) tablespace IDX_TABLESPACE local unusable;
-- Then I need to rebuild index online choosing partition:
ALTER INDEX I_EMPLOYEE_SALARY_T REBUILD PARTITION partition_name online;
-- Altering index is much faster because it using different, faster mechanism and data can be accessed much sooner. For example when data for employees (in this example) is partitioned by timestamp. So newer data are more quickly available.
-- *********** CONSTRAINTS (PK, FK, etc.) *************
SELECT C.CONSTRAINT_NAME, C.CONSTRAINT_TYPE, c.STATUS, c.* FROM USER_CONSTRAINTS C
WHERE TABLE_NAME = 'DT_OPE_TARJETA_LIQUIDACION';
-- *** PRIMARY KEYS ****
-- No requiere un UNIQUE INDEX para cumplir; puede usar un NON-UNIQUE para ver si ya existe un valor (p.ej. si la hacemos DEFERRABLE).
-- NON-VALIDATE le permite ignorar duplicados ya existentes.
-- HINT para ignorar dups al hacer un insert:
insert /*+ ignore_row_on_dupkey_index(t_unique_cust, t_unique_cust_pk_idx) */ into t_unique_cust
(select * from t_non_unique_cust);
-- Para drop PK Y SUS ÍNDICES hayan o no sido creados a la vez:
ALTER TABLE DT_OPE_TARJETA DROP PRIMARY KEY DROP INDEX;
-- NOTA: Si la PK fue creada cuando YA existía el índice asociado, el índice NO será dropped al hacer drop de la PK sin DROP INDEX.
-- si no queremos perder el UNIQUE INDEX asociado.
ALTER TABLE DT_OPE_TARJETA DROP PRIMARY KEY KEEP INDEX;
-- Activar/desactivar constraints anónimas de cierto tipo
begin
for cnames in ( select table_name,constraint_name from user_constraints where table_name = 'TESTXX' and search_condition_vc like '%NOT NULL%') loop
execute immediate 'alter table ' || cnames.table_name || ' disable constraint ' || cnames.constraint_name;
end loop;
end;
-- modificar estado constraint
ALTER TABLE DWH_GCI.DT_OPE_TARJETA_LIQUIDACION MODIFY CONSTRAINT DT_OPE_TARJETA_LIQUIDACION_PK ENABLE USING INDEX TABLESPACE DWH_GCI_INX LOCAL; -- RELY , VALIDATE, etc.
-- estado guardado de PKG_ADM para REHABILITAR_CONSTRAINTS: (G. Caja Ingenieros)
select *
FROM DWH_ADM.DWH_ADM_STATUS_CONSTRAINTS
WHERE OWNER = 'DWH_GCI'
--AND CONSTRAINT_NAME = P_CONSTRAINT_NAME
AND TABLE_NAME IN ('DT_OPE_TARJETA_LIQUIDACION','DT_MES_TARJETA','AG_MES_TARJETA_12M');
-- ENABLE and DISABLE a DEFAULT value
ALTER TABLE DWH_GCI.DT_OPE_TARJETA MODIFY ID_OPE_TARJETA DEFAULT "DWH_GCI"."DT_OPE_TARJETA_SEQ"."NEXTVAL";
-- CUIDADO!!!! no hace falta DROP DEFAULT para insertar valores arbitrarios; sólo toma valor de la secuencia si NO le enviamos un valor!!
-- ==> cargas RC sólo tienes que borrar tabla y enviarle informado el campo PK!!!!
ALTER TABLE Employees MODIFY Salary DROP DEFAULT;
-- or DEFAULT NULL, pero creo que más limpio internamente el DROP (no deja el default_length informado, imagino)
-- *********** SEQUENCES ********** (recordar las nuevas IDENTITY, auto-incrementales más rápidas que trigger+sequence, y más rapido aún los DEFAULT y sin tener que desactivar para cargas completas)
-- hacer avanzar una sequence N numeros (en este caso select max(....)
-- connect by genera tantas filas como indique el <= así que hacemos avanzar ese num de fila desde el valor que tuviera
SELECT NVL(MAX(ID_OPE_TARJETA), 0) FROM DWH_GCI.DT_OPE_TARJETA;
DROP SEQUENCE "DWH_GCI"."DT_OPE_TARJETA_SEQ";
CREATE SEQUENCE "DWH_GCI"."DT_OPE_TARJETA_SEQ" MINVALUE 1 INCREMENT BY 1 START WITH 0 CACHE 1000 ORDER;
-- *** Si se usa como valor DEFAULT de un campo autonumérico:
-- No hace falta DROP DEFAULT para insertar valores arbitrarios; sólo toma valor de la secuencia si NO le enviamos un valor!!
-- ==> cargas RC sólo tienes que borrar tabla y enviarle informado el campo PK!!!!
-- Restart sequence (drop si existe + create):
DECLARE
EXISTS_SEQ INTEGER;
sql_stmt VARCHAR2(500);
BEGIN
EXISTS_SEQ:=0;
sql_stmt:='';
SELECT count(*) cnt into EXISTS_SEQ FROM ALL_SEQUENCES
where sequence_owner='BISLT_INT_D' and SEQUENCE_NAME = 'D_MAC_ACREDITACIO_TUTELA_SEQ';
IF EXISTS_SEQ>0 THEN
-- restart sequence
EXECUTE IMMEDIATE 'DROP SEQUENCE D_MAC_ACREDITACIO_TUTELA_SEQ';
END IF;
EXECUTE IMMEDIATE 'CREATE SEQUENCE D_MAC_ACREDITACIO_TUTELA_SEQ MINVALUE 1 INCREMENT BY 1 START WITH 1 CACHE 20 ORDER NOCYCLE';
EXECUTE IMMEDIATE'GRANT SELECT, ALTER ON D_MAC_ACREDITACIO_TUTELA_SEQ TO BISLT_ADM_APLC_ROLE';
EXECUTE IMMEDIATE'GRANT SELECT ON D_MAC_ACREDITACIO_TUTELA_SEQ TO BISLT_ADM_CONS_ROLE';
END;
-- CREACION de una secuencia con CONNECT BY level
-- con DATE
select to_char(dt + (level - 1),'yyyyMMdd' ) LOAD_ID
, 'DAV' DATAMART_ID
, 'MAC' SCOPE_ID
, 'Càrrega any '|| EXTRACT(YEAR from (dt + (level - 1)) ) ||' Mòdul Assignació Credencials (MAC)' DESCRIPTION
, 'OPS$DPFPROD' CREATION_USER
, SYSDATE CREATION_TIME
from ( select to_date('20100101','yyyyMMdd') dt from dual )
connect by dt + (level - 1) <= to_date('20500101','yyyyMMdd');
-- *********** COMMENTS ************
-- COMMENTS with line breaks (just create them with them in the script):
-- NOTA: No sé si es necesario que sean tipo Windows \r\n (funciona con ORACLE, probado) o UNIX \n también ok.
-- Excel: =CONCAT("COMMENT ON COLUMN ";$A2;".";$B2;" IS '";SUSTITUIR($G2;"'";"''");"';")
-- To see them with their line breaks properly presented:
SELECT * FROM USER_COL_COMMENTS
WHERE TABLE_NAME = 'FACT_IND_CONTRATO_HST'
AND COMMENTS IS NOT NULL;
-- Query Results WON'T show the line breaks ("Run Sentence" or Ctrl+Enter).
-- You'll ONLY get exactly what you're looking for in SCRIPT OUTPUT (i.e., highlight the query, right-click, select "Run Script" or F5):
-- TABLE_NAME COLUMN_NAME COMMENTS
-- ---------- ----------- --------------
-- MYTABLE MYCOLUMN Line 1
-- Line 2
-- Line 3
-- MYTABLE OTHERCOLUMN Other comments
-- ******* TRUNCATE **********
-- TRUNCATE PARTITION
-- to get the partition names against the dates
select t.dt, uo.subobject_name from tst_summary t, user_objects uo where dbms_rowid.rowid_object(t.rowid) = uo.object_id;
-- TRUNCATE PARTITION
ALTER TABLE TST_SUMMARY TRUNCATE PARTITION SYS_12;
-- ***** TRUNCATE PARTITION FOR ******
-- TRUNCATE partition which contains value
/*
SQL> select partition_name,high_value from user_tab_partitions where table_name = 'RANGE_SALES';
PARTITION_NAME HIGH_VALUE
------------------------------ --------------------------------------------------------------------------------
SALES_Q1_1998 TO_DATE(' 1998-04-01 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIA
SALES_Q1_1999 TO_DATE(' 1999-04-01 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIA
SALES_Q1_2000 TO_DATE(' 2000-04-01 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIA
SALES_Q2_1998 TO_DATE(' 1998-07-01 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIA
SQL> select count(*)
2 from range_sales partition(sales_q1_2000)
3 /
*/
-- TRUNCATE partition which contains date 2000-02-15:
-- si es campo DATE:
alter table range_sales
TRUNCATE PARTITION FOR(date '2000-02-15')
/
-- Si es campo numerico:
ALTER TABLE FID.DT_SIRBE_DETALLE_HST
TRUNCATE PARTITION FOR (20170430)
/
-- ****** ANALYTIC FUNCTIONS *******
-- *** RANK() OVER (PARTITION BY <campos_partition> ORDER BY <campos_order>).
SELECT mctaclav, mctaprod, mctafape
FROM (
SELECT mctaclav, mctaprod, mctafape,
RANK() OVER (PARTITION BY mctaprod, mctafape ORDER BY mctafuop) AS num_seq
FROM sf_mcta)
WHERE num_seq = 1;
-- *** Search and Delete duplicates
delete from films
-- It's always possible by RowId even if the full row is identical:
where rowid NOT in (
select min(rowid)
from films
group by title, uk_release_date
having count(*) >= 1 -- sólo los duplicados, que no borre los que NO tienen duplicado =1)!
)
-- más preciso, borramos el 2o de cada par según su surrogado
delete from DWH_GCI.LK_CON_TARJETA
where ID_TAR_CONTRATO in ( -- mismo coste que NOT IN con MIN()
SELECT max(ID_TAR_CONTRATO) as "ID"
FROM DWH_GCI.LK_CON_TARJETA
GROUP BY CO_TAR_CONTRATO having count(*) > 1
)
;
--****** DESARROLLO query test SCD tipo 2 ********************************************************************
truncate table SCD2Test drop storage;
drop table SCD2Test;
CREATE GLOBAL TEMPORARY TABLE SCD2Test (
id NUMBER(8,0),
period_start_date DATE,
period_end_date DATE
) ON COMMIT PRESERVE ROWS;
REM INSERTING into DSOCIAL.SCD2TEST
SET DEFINE OFF;
Insert into DSOCIAL.SCD2TEST (ID,period_start_date,period_end_date) values (1,to_date('01/10/2018 00:00','DD/MM/YYYY HH24:MI'),to_date('31/10/2018 00:00','DD/MM/YYYY HH24:MI'));
Insert into DSOCIAL.SCD2TEST (ID,period_start_date,period_end_date) values (1,to_date('01/11/2018 00:00','DD/MM/YYYY HH24:MI'),to_date('18/11/2018 11:15','DD/MM/YYYY HH24:MI'));
Insert into DSOCIAL.SCD2TEST (ID,period_start_date,period_end_date) values (1,to_date('19/11/2018 11:16','DD/MM/YYYY HH24:MI'),to_date('31/12/2199 00:00','DD/MM/YYYY HH24:MI'));
Insert into DSOCIAL.SCD2TEST (ID,period_start_date,period_end_date) values (2,to_date('01/01/2018 11:16','DD/MM/YYYY HH24:MI'),to_date('31/12/2199 00:00','DD/MM/YYYY HH24:MI'));
Insert into DSOCIAL.SCD2TEST (ID,period_start_date,period_end_date) values (3,to_date('01/01/2018 11:16','DD/MM/YYYY HH24:MI'),to_date('31/12/2018 11:16','DD/MM/YYYY HH24:MI'));
Insert into DSOCIAL.SCD2TEST (ID,period_start_date,period_end_date) values (3,to_date('01/01/2019 11:17','DD/MM/YYYY HH24:MI'),to_date('31/12/2199 00:00','DD/MM/YYYY HH24:MI'));
select * FROM SCD2Test order by ID,period_start_date;
-- Detectar start_date o end_date que violan cronología (se permiten gaps entre fin de periodo e inicio del siguiente!!!):
-- De paso, FLAG_ACTIVO que indica el período vigente (si registro correcto, start_date_OK =1 y end_date_OK = 1)
SELECT
/*expdbk, prcdbk, fmldbk,*/entitat_origen, entitat_desti, subtipo_relacio, period_start_date, period_end_date
, (trunc_start_date - last_end) as start_dif_days
, (next_start - trunc_end_date) as end_dif_days
, CASE WHEN ( last_end is null OR last_end >= trunc_start_date ) THEN 1 else 0 END as start_date_OK
, CASE WHEN ( next_start is null OR next_start <= trunc_end_date ) THEN 1 else 0 END as end_date_OK
, DECODE (date_rank, 1, 1, 0) as flag_activo
FROM (
select
/*expdbk, prcdbk, fmldbk*/entitat_origen, entitat_desti, subtipo_relacio
, period_start_date, period_end_date
, trunc(period_start_date) as trunc_start_date
, trunc(period_end_date) as trunc_end_date
, LAG (trunc(period_end_date)) over (partition by /*expdbk, prcdbk, fmldbk,*/entitat_origen, entitat_desti, subtipo_relacio order by /*expdbk, prcdbk, fmldbk,*/entitat_origen, entitat_desti, subtipo_relacio, period_start_date) as last_end
, LEAD (trunc(period_start_date)) over (partition by /*expdbk, prcdbk, fmldbk,*/entitat_origen, entitat_desti, subtipo_relacio order by /*expdbk, prcdbk, fmldbk,*/entitat_origen, entitat_desti, subtipo_relacio, period_start_date) as next_start
, RANK() over (partition by /*expdbk, prcdbk, fmldbk,*/entitat_origen, entitat_desti, subtipo_relacio order by /*expdbk, prcdbk, fmldbk,*/entitat_origen, entitat_desti, subtipo_relacio, period_start_date desc) as date_rank
from tmp_int_relacio WHERE tipus_relacio = 6
)
ORDER BY entitat_origen, entitat_desti, subtipo_relacio, period_start_date;
-- QUERY test SCD type 2: fechas consecutivas sin gaps (si es requerido que no haya gaps...)
SELECT
id, period_start_date, period_end_date
, CASE WHEN ( last_end is null OR last_end = period_start_date - 1 ) THEN 1 else 0 END as start_date_OK
, CASE WHEN ( next_start is null OR next_start = period_end_date + 1 ) THEN 1 else 0 END as end_date_OK
-- for testing
--, last_end
--, next_start
-- we truncate dates because we do not care about time of day
FROM (
select
id
, trunc(period_start_date) as period_start_date
, trunc(period_end_date) as period_end_date
, LAG (trunc(period_end_date)) over (partition by id order by id, period_start_date) as last_end
, LEAD (trunc(period_start_date)) over (partition by id order by id, period_start_date) as next_start
from SCD2Test
)
ORDER BY id, period_start_date;
--************** fin DESARROLLO query test SCD tipo 2 *************************************************
select * from (select count( distinct cell_name) from v$cell_state);
select case when count(cell_name) > 0 then 'EXADATA'
else 'NOT EXADATA' END "IsExadata"
from v$cell_state;
-- Execute with the user which owns the table:
/*
select *
from user_segments
where segment_name = 'table_name';
*/
col segment_name format a20
select segment_name
, bytes SIZE_BYTES
, ceil(bytes / 1024 / 1024) SIZE_MB
from user_segments
where segment_name like '&obj_name'
/
-- ******* Time Functions *******
SELECT SESSIONTIMEZONE, CURRENT_DATE FROM DUAL;
SELECT SESSIONTIMEZONE, CURRENT_TIMESTAMP FROM DUAL;
-- Logamos tiempo:
DECLARE
Tini pls_integer;
Tfin pls_integer;
BEGIN
Tini := dbms_utility.get_time;
-- cosas
Tfin := dbms_utility.get_time;
DBMS_OUTPUT.put_line( (Tfin-Tini)/100 /60 || ' minutos.');
END;
/
TIMESTAMP
==========
-- Dif en segundos
EXTRACT(DAY FROM(updated - created) ) * 24 * 60 * 60
+ EXTRACT(HOUR FROM(updated - created) ) * 60 * 60
+ EXTRACT(MINUTE FROM(updated - created) ) * 60
+ EXTRACT(SECOND FROM(updated - created) ) AS calc_extract
-- para trabajar sólo con la hora:
, TO_CHAR(lpisteplog.START_DATE, 'HH24:MI') as START_TIME_CH, TO_CHAR(lpisteplog.END_DATE, 'HH24:MI') as END_TIME_CH
-- para obtener el average de cierta hora del día (sin tener el cuenta el día en que ocurre):
, to_char(to_date(round(AVG((START_DATE-TRUNC(START_DATE))*24*60*60)),'sssss'),'hh24:mi:ss') AS AVG_START_DATE
SELECT
created
, updated
, EXTRACT(DAY FROM(updated - created) ) day
, EXTRACT(HOUR FROM(updated - created) ) hour
, EXTRACT(MINUTE FROM(updated - created) ) minute
, EXTRACT(SECOND FROM(updated - created) ) seconds
, EXTRACT(DAY FROM(updated - created) ) * 24 * 60 * 60
+ EXTRACT(HOUR FROM(updated - created) ) * 60 * 60
+ EXTRACT(MINUTE FROM(updated - created) ) * 60
+ EXTRACT(SECOND FROM(updated - created) ) AS calc_extract
, round((cast(updated as date) - cast(created as date)) * 24 * 60 * 60) calc_date
, EXTRACT(DAY FROM(updated - created))* 24 * 60 * 60 extract_day_in_secs
, EXTRACT(HOUR FROM(updated - created)) * 60 * 60 extract_hour_in_secs
, EXTRACT(MINUTE FROM(updated - created))* 60 minute_in_secs
FROM
(select
TO_TIMESTAMP('2000-01-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS') as CREATED
,TO_TIMESTAMP('2001-02-03 04:05:06', 'YYYY-MM-DD HH24:MI:SS') as UPDATED
FROM DUAL);
CUIDADO!!!
Excepto para algo rapido nunca usar en produccion el siguiente truco
Multiplica el intervalo timestamp por 86400 !! no aguanta más de unos 30 años, en 11bytes de Timestamp. Es un truco muy feo.
SELECT
created
, updated
-- lo correcto, optimo
--, EXTRACT(DAY FROM(updated - created) ) * 24 * 60 * 60
+ EXTRACT(HOUR FROM(updated - created) ) * 60 * 60
+ EXTRACT(MINUTE FROM(updated - created) ) * 60
+ EXTRACT(SECOND FROM(updated - created) ) AS calc_extract
-- truco feo que no aguanta un intervalo de mas de 30 años
, EXTRACT(DAY FROM( (updated - created)*24*60*60) ) as nasty_TRICK
FROM
(select
TO_TIMESTAMP('2000-01-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS') as CREATED
,TO_TIMESTAMP('2040-02-03 04:05:06', 'YYYY-MM-DD HH24:MI:SS') as UPDATED
FROM DUAL);
------ DATES ------------
-- EXTRACT(YEAR FROM date_field) is faster than TO_CHAR(date_field,'yyyy' ) !!!
-- (and clearer) but return NUMBER, not CHAR.
-- Para obtener listado de dias
select trunc(sysdate,'MM') - numtodsinterval(rownum - 1, 'day') my_month
from dual connect by level <= 15;
/* Lanzado un 11/02/2020:
01/02/2020 00:00:00
31/01/2020 00:00:00
30/01/2020 00:00:00
...
*/
-- LAG to fix
-- Tenemos una tabla en la que por error las fecha alta (DAL) han sido truncadas en origen.
-- Para evitar duplicidades cuando hay una baja y el alta siguiente en un mismo día, informamos la DAL de esos casos
-- con la fecha de baja del registro anterior.
select PAC_NIA, ATH_TUT, ATH_DAL
-- cuando hay una baja y el alta siguiente en un mismo día:
, CASE WHEN TRUNC(ATH_DAL)=TRUNC(LAG(ATH_DAB) OVER (PARTITION BY PAC_NIA, ATH_TUT ORDER BY ATH_DAB asc))
-- usamos como DAL la última DAB (de ese mismo día)
THEN LAG(ATH_DAB) OVER (PARTITION BY PAC_NIA, ATH_TUT ORDER BY ATH_DAB asc)
-- en el resto de casos
ELSE ATH_DAL
END as DAL_LAG
, ATH_DAB
from MACTBATH_STG
order by PAC_NIA, ATH_TUT, ATH_DAL, ATH_DAB;
select PAC_NIA, ATA_TUT, ATA_DAL
, LEAD( ATA_DAL,1, TO_DATE('#GLOBAL.DATA_ND_99991231','yyyyMMdd') ) OVER (PARTITION BY PAC_NIA, ATA_TUT ORDER BY ATA_DAL asc) as DTU_DAB
from MACTBATA_STG
where (PAC_NIA, ATA_TUT) in (
select PAC_NIA, ATA_TUT from MACTBATA_STG
group by PAC_NIA, ATA_TUT having count(*)>1
)
order by PAC_NIA, ATA_TUT, ATA_DAL;
set serveroutput on size unlimited
set timing on
declare
type audit_type is table of TABLEA%rowtype;
v_type audit_type;
CURSOR temp_cur is
select *
FROM TABLEA a
WHERE COLUMN1 IS NOT NULL
AND TRUNC(UPDATED_DT) < '01-MAR-18';
BEGIN
OPEN temp_cur;
/ collect data in the collection /
FETCH temp_cur BULK COLLECT INTO v_type;
/ close the pointer /
CLOSE temp_cur;
FORALL i in v_type.first .. v_type.last
INSERT INTO TABLEA_AUDIT VALUES v_type(i);
FORALL i in v_type.first .. v_type.last
DELETE FROM TABLEA WHERE PRIMARY_KEY_COL = v_type(i).PRIMARY_KEY_COL;
COMMIT;
END;
/
-- https://blogs.oracle.com/oraclemagazine/bulk-processing-with-bulk-collect-and-forall
-- select min(IDGSTUDY) from BISLT_INT_H.ft_simd_study where TD_TANCAMENT_STUDY is null; -- -1640249170562907
-- select max(IDGSTUDY) from BISLT_INT_H.ft_simd_study where TD_TANCAMENT_STUDY is null; -- 214905959358943805
-- select IDGSTUDY, TD_TANCAMENT_STUDY FROM BISLT_INT_H.ft_simd_study;
spool off
spool update_forall_TANCAMENT_STUDY.log
-- para que pinte DBMS_OUTPUT en el spool a archivo
SET SERVEROUTPUT ON;
DECLARE
Tini pls_integer;
Tfin pls_integer;
c_limit PLS_INTEGER := 100; -- sol ser el valor més adequat per tota BDD encara que sembli petit. Mai passar de 1000.
CURSOR c_IDGSTUDY IS
SELECT IDGSTUDY
, extract(day from ( DATECLOSED - STUDYDATETIME ))*24*60*60
+ extract(hour from (DATECLOSED - STUDYDATETIME))*60*60
+ extract(minute from (DATECLOSED - STUDYDATETIME))*60
+ extract(second from (DATECLOSED - STUDYDATETIME)) calc_TANCAMENT_STUDY
FROM BISLT_INT_H.ft_simd_study
WHERE TD_TANCAMENT_STUDY is null
;
-- If we wanted the whole row:
-- type row_type is table of ft_simd_study%rowtype;
TYPE id_tancament_rt IS RECORD
(
IDGSTUDY FT_SIMD_STUDY.IDGSTUDY%TYPE,
calc_TANCAMENT_STUDY FT_SIMD_STUDY.TD_TANCAMENT_STUDY%TYPE
);
TYPE study_tancament_t IS TABLE OF id_tancament_rt
INDEX BY BINARY_INTEGER;
l_study_tancament study_tancament_t;
BEGIN
DBMS_OUTPUT.ENABLE(1000000);
Tini := dbms_utility.get_time;
OPEN c_IDGSTUDY;
LOOP
FETCH c_IDGSTUDY
BULK COLLECT INTO l_study_tancament
LIMIT c_limit;
EXIT WHEN l_study_tancament.COUNT = 0;
FORALL indx IN 1 .. l_study_tancament.COUNT
UPDATE BISLT_INT_H.ft_simd_study stu
SET stu.TD_TANCAMENT_STUDY = l_study_tancament(indx).calc_TANCAMENT_STUDY
WHERE stu.TD_TANCAMENT_STUDY is null
and stu.IDGSTUDY = l_study_tancament(indx).IDGSTUDY;
COMMIT;
-- nomes activar aixo si seran poquets loops!!!
-- dbms_output.put_line( 'Looping, c%rowcount = ' || c%rowcount );
END LOOP;
Tini := dbms_utility.get_time;
DBMS_OUTPUT.PUT_LINE('Update of BISLT_INT_H.ft_simd_study completed.');
DBMS_OUTPUT.put_line( (Tfin-Tini)/100 /60 || ' minutos.');
dbms_output.put_line('Nb of updated rows, c%rowcount = ' || c%rowcount );
EXCEPTION
WHEN OTHERS THEN
dbms_output.put_line('Error code:' || SQLCODE);
dbms_output.put_line('Error message:' || sqlerrm);
RAISE;
END;
/
spool off
-- select IDGSTUDY, TD_TANCAMENT_STUDY FROM BISLT_INT_H.ft_simd_study where TD_TANCAMENT_STUDY is null;
-- ********************************************
-- per una sola columna:
-- ********************************************
DECLARE
c_limit PLS_INTEGER := 100;
CURSOR employees_cur
IS
SELECT employee_id
FROM employees
WHERE department_id = department_id_in;
TYPE employee_ids_t IS TABLE OF employees.employee_id%TYPE;
l_employee_ids employee_ids_t;
BEGIN
OPEN employees_cur;
LOOP
FETCH employees_cur
BULK COLLECT INTO l_employee_ids
LIMIT c_limit; -- This will make sure that every iteration has 100 records selected
EXIT WHEN l_employee_ids.COUNT = 0;
FORALL indx IN 1 .. l_employee_ids.COUNT SAVE EXCEPTIONS /* es para que desvie los errores al log */
UPDATE employees emp -- Updating 100 records at 1 go.
SET emp.salary =
emp.salary + emp.salary * increase_pct_in
WHERE emp.employee_id = l_employee_ids(indx);
commit;
END LOOP;
EXCEPTION
WHEN OTHERS
THEN
IF SQLCODE = -24381
THEN
FOR indx IN 1 .. SQL%BULK_EXCEPTIONS.COUNT
LOOP
-- Caputring errors occured during update
DBMS_OUTPUT.put_line (
SQL%BULK_EXCEPTIONS (indx).ERROR_INDEX
|| ‘: ‘
|| SQL%BULK_EXCEPTIONS (indx).ERROR_CODE);
--<You can insert the error records to a table here>
END LOOP;
ELSE
RAISE;
END IF;
END;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment