new test for data format INSERT, some fixes
This commit is contained in:
parent
1d270383ae
commit
b200e4e604
@ -175,7 +175,7 @@ EXAMPLE ZIP FILE SQL*Plus
|
||||
-- Example Windows: certutil -decode app_100.zip.base64 app_100.zip
|
||||
-- Example Mac: base64 -D -i app_100.zip.base64 -o app_100.zip
|
||||
-- Example Linux: base64 -d app_100.zip.base64 > app_100.zip
|
||||
set verify off feedback off heading off termout off
|
||||
set verify off feedback off heading off
|
||||
set trimout on trimspool on pagesize 0 linesize 5000 long 100000000 longchunksize 32767
|
||||
whenever sqlerror exit sql.sqlcode rollback
|
||||
variable contents clob
|
||||
@ -188,9 +188,11 @@ BEGIN
|
||||
p_include_templates => true)));
|
||||
END;
|
||||
/
|
||||
set termout off
|
||||
spool "app_100.zip.base64"
|
||||
print contents
|
||||
spool off
|
||||
set termout on
|
||||
```
|
||||
|
||||
SIGNATURE
|
||||
|
||||
@ -12,7 +12,8 @@
|
||||
"postbuild": "echo exit | sqlplus -S /@playground @plex_install.sql",
|
||||
"watch": "chokidar src/PLEX.pks src/PLEX.pkb src/plex_install.sql --initial -c \"npm run build\"",
|
||||
"test-ccflags": "cd src && echo exit | sqlplus -S /@playground @plex_test_ccflags.sql && cd ..",
|
||||
"test-export": "cd src && echo exit | sqlplus -S /@playground @plex_test_export.sql && cd .."
|
||||
"test-export": "cd src && echo exit | sqlplus -S /@playground @plex_test_export.sql && cd ..",
|
||||
"test-types": "cd src && echo exit | sqlplus -S /@playground @plex_test_types.sql && cd .."
|
||||
},
|
||||
"devDependencies": {
|
||||
"chokidar-cli": "^2.1.0",
|
||||
|
||||
@ -282,7 +282,7 @@ EXAMPLE ZIP FILE SQL*Plus
|
||||
-- Example Windows: certutil -decode app_100.zip.base64 app_100.zip
|
||||
-- Example Mac: base64 -D -i app_100.zip.base64 -o app_100.zip
|
||||
-- Example Linux: base64 -d app_100.zip.base64 > app_100.zip
|
||||
set verify off feedback off heading off termout off
|
||||
set verify off feedback off heading off
|
||||
set trimout on trimspool on pagesize 0 linesize 5000 long 100000000 longchunksize 32767
|
||||
whenever sqlerror exit sql.sqlcode rollback
|
||||
variable contents clob
|
||||
@ -295,9 +295,11 @@ BEGIN
|
||||
p_include_templates => true)));
|
||||
END;
|
||||
{{/}}
|
||||
set termout off
|
||||
spool "app_100.zip.base64"
|
||||
print contents
|
||||
spool off
|
||||
set termout on
|
||||
```
|
||||
**/
|
||||
|
||||
@ -844,6 +846,7 @@ PROCEDURE util_clob_query_to_csv (
|
||||
|
||||
PROCEDURE util_clob_table_to_insert (
|
||||
p_table_name IN VARCHAR2,
|
||||
p_data_scn IN NUMBER,
|
||||
p_max_rows IN NUMBER DEFAULT 1000);
|
||||
|
||||
PROCEDURE util_clob_create_runtime_log (p_export_files IN OUT NOCOPY tab_export_files);
|
||||
@ -1567,6 +1570,7 @@ END util_clob_query_to_csv;
|
||||
|
||||
PROCEDURE util_clob_table_to_insert (
|
||||
p_table_name IN VARCHAR2,
|
||||
p_data_scn IN NUMBER,
|
||||
p_max_rows IN NUMBER DEFAULT 1000)
|
||||
IS
|
||||
v_cursor PLS_INTEGER;
|
||||
@ -1654,7 +1658,7 @@ BEGIN
|
||||
v_cursor := dbms_sql.open_cursor;
|
||||
dbms_sql.parse(
|
||||
v_cursor,
|
||||
'select * from ' || p_table_name || ' order by ' || get_order_by_list,
|
||||
'select * from ' || p_table_name || ' as of scn ' || p_data_scn || ' order by ' || get_order_by_list,
|
||||
dbms_sql.native);
|
||||
-- https://support.esri.com/en/technical-article/000010110
|
||||
-- http://bluefrog-oracle.blogspot.com/2011/11/describing-ref-cursor-using-dbmssql-api.html
|
||||
@ -1705,6 +1709,7 @@ BEGIN
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
ELSE
|
||||
v_buffer_varchar2 := 'CLOB value skipped - larger then ' || c_vc2_max_size || ' characters';
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
END IF;
|
||||
ELSIF v_desc_tab(i).col_type = c_xmltype THEN
|
||||
@ -1716,6 +1721,7 @@ BEGIN
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
ELSE
|
||||
v_buffer_varchar2 := 'XML value skipped - larger then ' || c_vc2_max_size || ' characters';
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
END IF;
|
||||
ELSIF v_desc_tab(i).col_type = c_long THEN
|
||||
@ -1724,10 +1730,14 @@ BEGIN
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
ELSE
|
||||
util_clob_append('LONG value skipped - larger then ' || c_vc2_max_size || ' characters');
|
||||
v_buffer_varchar2 := 'LONG value skipped - larger then ' || c_vc2_max_size || ' characters';
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
END IF;
|
||||
ELSIF v_desc_tab(i).col_type IN (c_raw, c_long_raw, c_blob, c_bfile) THEN
|
||||
util_clob_append('Binary data type skipped - currently not supported');
|
||||
v_buffer_varchar2 := 'Binary data type skipped - currently not supported';
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
ELSE
|
||||
dbms_sql.column_value(v_cursor, i, v_buffer_varchar2);
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
@ -2696,7 +2706,10 @@ SELECT table_name,
|
||||
BEGIN
|
||||
v_file_path := p_base_path_data || '/' || v_rec.table_name || '.sql';
|
||||
util_log_start(v_file_path);
|
||||
util_clob_table_to_insert(p_table_name => v_rec.table_name);
|
||||
util_clob_table_to_insert(
|
||||
p_table_name => v_rec.table_name,
|
||||
p_data_scn => v_data_scn,
|
||||
p_max_rows => p_data_max_rows);
|
||||
util_clob_add_to_export_files(
|
||||
p_export_files => v_export_files,
|
||||
p_name => v_file_path);
|
||||
|
||||
19
src/PLEX.pkb
19
src/PLEX.pkb
@ -205,6 +205,7 @@ PROCEDURE util_clob_query_to_csv (
|
||||
|
||||
PROCEDURE util_clob_table_to_insert (
|
||||
p_table_name IN VARCHAR2,
|
||||
p_data_scn IN NUMBER,
|
||||
p_max_rows IN NUMBER DEFAULT 1000);
|
||||
|
||||
PROCEDURE util_clob_create_runtime_log (p_export_files IN OUT NOCOPY tab_export_files);
|
||||
@ -928,6 +929,7 @@ END util_clob_query_to_csv;
|
||||
|
||||
PROCEDURE util_clob_table_to_insert (
|
||||
p_table_name IN VARCHAR2,
|
||||
p_data_scn IN NUMBER,
|
||||
p_max_rows IN NUMBER DEFAULT 1000)
|
||||
IS
|
||||
v_cursor PLS_INTEGER;
|
||||
@ -1015,7 +1017,7 @@ BEGIN
|
||||
v_cursor := dbms_sql.open_cursor;
|
||||
dbms_sql.parse(
|
||||
v_cursor,
|
||||
'select * from ' || p_table_name || ' order by ' || get_order_by_list,
|
||||
'select * from ' || p_table_name || ' as of scn ' || p_data_scn || ' order by ' || get_order_by_list,
|
||||
dbms_sql.native);
|
||||
-- https://support.esri.com/en/technical-article/000010110
|
||||
-- http://bluefrog-oracle.blogspot.com/2011/11/describing-ref-cursor-using-dbmssql-api.html
|
||||
@ -1066,6 +1068,7 @@ BEGIN
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
ELSE
|
||||
v_buffer_varchar2 := 'CLOB value skipped - larger then ' || c_vc2_max_size || ' characters';
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
END IF;
|
||||
ELSIF v_desc_tab(i).col_type = c_xmltype THEN
|
||||
@ -1077,6 +1080,7 @@ BEGIN
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
ELSE
|
||||
v_buffer_varchar2 := 'XML value skipped - larger then ' || c_vc2_max_size || ' characters';
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
END IF;
|
||||
ELSIF v_desc_tab(i).col_type = c_long THEN
|
||||
@ -1085,10 +1089,14 @@ BEGIN
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
ELSE
|
||||
util_clob_append('LONG value skipped - larger then ' || c_vc2_max_size || ' characters');
|
||||
v_buffer_varchar2 := 'LONG value skipped - larger then ' || c_vc2_max_size || ' characters';
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
END IF;
|
||||
ELSIF v_desc_tab(i).col_type IN (c_raw, c_long_raw, c_blob, c_bfile) THEN
|
||||
util_clob_append('Binary data type skipped - currently not supported');
|
||||
v_buffer_varchar2 := 'Binary data type skipped - currently not supported';
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
util_clob_append(v_buffer_varchar2);
|
||||
ELSE
|
||||
dbms_sql.column_value(v_cursor, i, v_buffer_varchar2);
|
||||
prepare_varchar2_buffer_for_scripting;
|
||||
@ -2057,7 +2065,10 @@ SELECT table_name,
|
||||
BEGIN
|
||||
v_file_path := p_base_path_data || '/' || v_rec.table_name || '.sql';
|
||||
util_log_start(v_file_path);
|
||||
util_clob_table_to_insert(p_table_name => v_rec.table_name);
|
||||
util_clob_table_to_insert(
|
||||
p_table_name => v_rec.table_name,
|
||||
p_data_scn => v_data_scn,
|
||||
p_max_rows => p_data_max_rows);
|
||||
util_clob_add_to_export_files(
|
||||
p_export_files => v_export_files,
|
||||
p_name => v_file_path);
|
||||
|
||||
@ -235,7 +235,7 @@ EXAMPLE ZIP FILE SQL*Plus
|
||||
-- Example Windows: certutil -decode app_100.zip.base64 app_100.zip
|
||||
-- Example Mac: base64 -D -i app_100.zip.base64 -o app_100.zip
|
||||
-- Example Linux: base64 -d app_100.zip.base64 > app_100.zip
|
||||
set verify off feedback off heading off termout off
|
||||
set verify off feedback off heading off
|
||||
set trimout on trimspool on pagesize 0 linesize 5000 long 100000000 longchunksize 32767
|
||||
whenever sqlerror exit sql.sqlcode rollback
|
||||
variable contents clob
|
||||
@ -248,9 +248,11 @@ BEGIN
|
||||
p_include_templates => true)));
|
||||
END;
|
||||
{{/}}
|
||||
set termout off
|
||||
spool "app_100.zip.base64"
|
||||
print contents
|
||||
spool off
|
||||
set termout on
|
||||
```
|
||||
**/
|
||||
|
||||
|
||||
122
src/plex_test_types.sql
Normal file
122
src/plex_test_types.sql
Normal file
@ -0,0 +1,122 @@
|
||||
timing start test_export
|
||||
set verify off feedback off heading off timing on
|
||||
set trimout on trimspool on pagesize 0 linesize 5000 long 100000000 longchunksize 32767
|
||||
whenever sqlerror exit sql.sqlcode rollback
|
||||
whenever oserror continue
|
||||
variable zip clob
|
||||
|
||||
prompt
|
||||
prompt PLEX Test Export Format INSERT With Multiple Data Types
|
||||
prompt =======================================================
|
||||
|
||||
prompt Drop existing test objects
|
||||
begin
|
||||
for i in (
|
||||
select object_type, object_name
|
||||
from user_objects
|
||||
where object_type = 'TABLE' and object_name = 'PLEX_TEST_MULTIPLE_DATATYPES'
|
||||
or object_type = 'PACKAGE' and object_name = 'PLEX_TEST_MULTIPLE_DATATYPES_API')
|
||||
loop
|
||||
execute immediate 'drop ' || i.object_type || ' ' || i.object_name;
|
||||
end loop;
|
||||
end;
|
||||
/
|
||||
|
||||
prompt Create table plex_test_multiple_datatypes
|
||||
begin
|
||||
for i in (
|
||||
select 'PLEX_TEST_MULTIPLE_DATATYPES' from dual
|
||||
minus
|
||||
select object_name from user_objects)
|
||||
loop
|
||||
execute immediate q'[
|
||||
create table plex_test_multiple_datatypes (
|
||||
ptmd_id integer generated always as identity,
|
||||
ptmd_varchar varchar2(15 char) ,
|
||||
ptmd_char char(1 char) not null ,
|
||||
ptmd_integer integer ,
|
||||
ptmd_number number ,
|
||||
ptmd_number_x_5 number(*,5) ,
|
||||
ptmd_number_20_5 number(20,5) ,
|
||||
ptmd_float float ,
|
||||
ptmd_float_size_30 float(30) ,
|
||||
ptmd_xmltype xmltype ,
|
||||
ptmd_clob clob ,
|
||||
ptmd_blob blob ,
|
||||
ptmd_date date ,
|
||||
ptmd_timestamp timestamp ,
|
||||
ptmd_timestamp_tz timestamp with time zone ,
|
||||
ptmd_timestamp_ltz timestamp with local time zone ,
|
||||
--
|
||||
primary key (ptmd_id),
|
||||
unique (ptmd_varchar)
|
||||
)
|
||||
]';
|
||||
end loop;
|
||||
end;
|
||||
/
|
||||
|
||||
prompt Create table API for plex_test_multiple_datatypes
|
||||
begin
|
||||
for i in (
|
||||
select 'PLEX_TEST_MULTIPLE_DATATYPES_API' from dual
|
||||
minus
|
||||
select object_name from user_objects)
|
||||
loop
|
||||
om_tapigen.compile_api(
|
||||
p_table_name => 'PLEX_TEST_MULTIPLE_DATATYPES',
|
||||
p_enable_custom_defaults => true);
|
||||
end loop;
|
||||
end;
|
||||
/
|
||||
|
||||
prompt Insert 100 rows into plex_test_multiple_datatypes
|
||||
declare
|
||||
l_rows_tab plex_test_multiple_datatypes_api.t_rows_tab;
|
||||
l_number_records pls_integer := 1;
|
||||
begin
|
||||
l_rows_tab := plex_test_multiple_datatypes_api.t_rows_tab();
|
||||
l_rows_tab.extend(l_number_records);
|
||||
for i in 1 .. l_number_records loop
|
||||
l_rows_tab(i) := plex_test_multiple_datatypes_api.get_a_row;
|
||||
end loop;
|
||||
plex_test_multiple_datatypes_api.create_rows(l_rows_tab);
|
||||
commit;
|
||||
end;
|
||||
/
|
||||
|
||||
prompt Run plex.backapp (this can take some time...)
|
||||
BEGIN
|
||||
:zip := plex.to_base64(plex.to_zip(plex.backapp(
|
||||
p_app_id => 100,
|
||||
p_include_object_ddl => true,
|
||||
p_include_ords_modules => true,
|
||||
p_include_data => true,
|
||||
p_data_format => 'csv,insert',
|
||||
p_include_templates => true)));
|
||||
END;
|
||||
/
|
||||
|
||||
set termout off
|
||||
spool "app_100.zip.base64"
|
||||
print zip
|
||||
spool off
|
||||
set termout on
|
||||
|
||||
prompt Exract zip on host operating system:
|
||||
prompt Try Windows: certutil -decode app_100.zip.base64 app_100.zip
|
||||
host certutil -decode app_100.zip.base64 app_100.zip
|
||||
prompt Try Mac: base64 -D -i app_100.zip.base64 -o app_100.zip
|
||||
host base64 -D -i app_100.zip.base64 -o app_100.zip
|
||||
prompt Try Linux: base64 -d app_100.zip.base64 app_100.zip
|
||||
host base64 -d app_100.zip.base64 app_100.zip
|
||||
|
||||
prompt Delete base64 encoded file:
|
||||
prompt Windows, Mac, Linux: del app_100.zip.base64
|
||||
host del app_100.zip.base64
|
||||
|
||||
timing stop
|
||||
prompt =======================================================
|
||||
prompt Done :-)
|
||||
prompt
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user