Trigger insert on 2nd table with raise_application_error ?

Hi,
Is it possible to use a raise_application_error IF you want an insert to fail on the table the insert trigger is based upon YET still allow an INSERT ON A SECOND TABLE ?
Here is the trigger.
CREATE OR REPLACE TRIGGER "SYSADM"."TBI_EARLY_RECEIPT_WARNING"
BEFORE INSERT
ON "RECEIVER_LINE" REFERENCING OLD AS OLDDATA NEW AS NEWDATA
FOR EACH ROW
DECLARE mDate SYSADM.PURC_ORDER_LINE.PROMISE_DATE%type;
BEGIN
SELECT
SYSADM.PURC_ORDER_LINE.PROMISE_DATE
INTO mDate
FROM SYSADM.PURC_ORDER_LINE
WHERE :NEWDATA.PURC_ORDER_LINE_NO = SYSADM.PURC_ORDER_LINE.LINE_NO
AND :NEWDATA.PURC_ORDER_ID = SYSADM.PURC_ORDER_LINE.PURC_ORDER_ID;
IF mDate > (SYSDATE + 30) THEN
          INSERT INTO BARCODERECEIVER_LINE
          (RECEIVER_ID, LINE_NO, PRINTED, QTY, PO, VENDOR, DESCRIPTION, PART, RECEIVED_DATE, PRINTER, EARLY, TRANSTIME)
          SELECT
     :NEWDATA.RECEIVER_ID,
     :NEWDATA.LINE_NO,
     'N',
     :NEWDATA.RECEIVED_QTY,
     :NEWDATA.PURC_ORDER_ID,
     SYSADM.VENDOR.NAME,
     SYSADM.PART.DESCRIPTION,
     NVL(SYSADM.PURC_ORDER_LINE.PART_ID, SYSADM.PURC_ORDER_LINE.VENDOR_PART_ID),
     SYSADM.RECEIVER.RECEIVED_DATE,
     DECODE(SYSADM.RECEIVER.USER_ID, 'CARL', 1, 'ORLANDO', 2, 'TONYA', 3, 'MATT', 'TOM', 3, 'SYSADM', 3),
'Y',
to_char(sysdate, 'Dy DD-Mon-YYYY HH24:MI:SS')
     FROM SYSADM.RECEIVER, SYSADM.PURC_ORDER_LINE, SYSADM.PART, SYSADM.PURCHASE_ORDER, SYSADM.VENDOR
     WHERE :NEWDATA.RECEIVER_ID = SYSADM.RECEIVER.ID
     AND :NEWDATA.PURC_ORDER_LINE_NO = SYSADM.PURC_ORDER_LINE.LINE_NO
     AND :NEWDATA.PURC_ORDER_ID = SYSADM.PURC_ORDER_LINE.PURC_ORDER_ID
     AND SYSADM.PURC_ORDER_LINE.PART_ID = SYSADM.PART.ID (+)
     AND SYSADM.RECEIVER.PURC_ORDER_ID = SYSADM.PURCHASE_ORDER.ID (+)
     AND SYSADM.PURCHASE_ORDER.VENDOR_ID = SYSADM.VENDOR.ID (+)
     AND SYSADM.RECEIVER.USER_ID IN ('ORLANDO', 'TONYA', 'MATT', 'TOM', 'SYSADM');
raise_application_error(-20000, 'Check with Purchasing please Exceeds Early Need Date');
END IF;
END;
If you comment out the raise_error message insert on the second table works perfectly. Now we need to prevent the insert on the main table. Again the insert into BARCODERECEIVER_LINE should succeed but the insert on the RECEIVER_LINE table should fail. Is there any work-around for this ? Many thanks for any reply.
Steve.

Hello
Here's an example of what you are trying to achieve(i think). I've not put any validation code in or anything but hopefully it demonstrates the principal...
CREATE TABLE test_audit(PURC_ORDER_ID varchar2(20),
                     PURC_ORDER_LINE_NO NUMBER,
                     RECEIVER_ID varchar2(20),
                     LINE_NO varchar2(20),
                     PRINTED varchar2(20),
                     RECEIVED_QTY NUMBER,
                     PO varchar2(20),
                     VENDOR varchar2(20),
                     DESCRIPTION varchar2(20),
                     RECEIVED_DATE DATE) ;
CREATE OR REPLACE PROCEDURE p_audit_insert(        v_PURC_ORDER_ID IN VARCHAR2,
                                               v_PURC_ORDER_LINE_NO IN NUMBER,
                                               v_RECEIVER_ID IN VARCHAR2,
                                               v_LINE_NO IN VARCHAR2,
                                               v_PRINTED IN VARCHAR2,
                                               v_RECEIVED_QTY IN NUMBER,
                                               v_PO IN VARCHAR2,
                                               v_VENDOR IN VARCHAR2,
                                               v_DESCRIPTION IN VARCHAR2,
                                              v_RECEIVED_DATE IN DATE
   IS
      --Could be replaced with a function call to get the pipe name
      lv_PipeName     VARCHAR2(30) := 'Test_Audit_Pipe';
      ln_Status       NUMBER;
   BEGIN
      DBMS_PIPE.pack_message(v_PURC_ORDER_ID );
      DBMS_PIPE.pack_message(v_PURC_ORDER_LINE_NO );
      DBMS_PIPE.pack_message(v_RECEIVER_ID );
      DBMS_PIPE.pack_message(v_LINE_NO );
      DBMS_PIPE.pack_message(v_PRINTED );
      DBMS_PIPE.pack_message(v_RECEIVED_QTY );
      DBMS_PIPE.pack_message(v_PO );
      DBMS_PIPE.pack_message(v_VENDOR );
      DBMS_PIPE.pack_message(v_DESCRIPTION );
      DBMS_PIPE.pack_message(v_RECEIVED_DATE );
      ln_Status := DBMS_PIPE.send_Message(lv_PipeName);
     dbms_output.put_line(ln_Status);
   END;
CREATE TABLE test_table(text       varchar2(20));
CREATE OR REPLACE TRIGGER test_table_trigger BEFORE INSERT ON test_table
FOR EACH ROW
DECLARE
BEGIN
   IF :NEW.text = 'Create audit row' THEN
           p_audit_insert( 'Purch order',
                           123,
                           'Receiver id',
                           'Line no',
                           'Printed',
                           456,
                           'PO number',
                           'Vendor',
                           :NEW.Text,
                           SYSDATE
           RAISE_APPLICATION_ERROR(-20001,'Audit row generated');
   END IF;
END;
CREATE OR REPLACE PROCEDURE p_Read_Audit_Pipe
IS
   lv_PipeName     VARCHAR2(30) := 'Test_Audit_Pipe';
   ln_Status       NUMBER;
   ln_DataType     NUMBER;
   ln_NumData      NUMBER;
   lv_CharData     VARCHAR2(32767);
   ldt_DateData    DATE;
   lv_PURC_ORDER_ID        VARCHAR2(20);
   lv_PURC_ORDER_LINE_NO   NUMBER;
   lv_RECEIVER_ID          VARCHAR2(20);
   lv_LINE_NO              VARCHAR2(20);
   lv_PRINTED              VARCHAR2(20);
   lv_RECEIVED_QTY         NUMBER;
   lv_PO                   VARCHAR2(20);
   lv_VENDOR               VARCHAR2(20);
   lv_DESCRIPTION          VARCHAR2(20);
   lv_RECEIVED_DATE        DATE;
BEGIN
   ln_Status := dbms_pipe.receive_message(lv_PipeName);
   dbms_pipe.unpack_message(lv_PURC_ORDER_ID);
   dbms_pipe.unpack_message(lv_PURC_ORDER_LINE_NO);
   dbms_pipe.unpack_message(lv_RECEIVER_ID);
   dbms_pipe.unpack_message(lv_LINE_NO);
   dbms_pipe.unpack_message(lv_PRINTED);
   dbms_pipe.unpack_message(lv_RECEIVED_QTY);
   dbms_pipe.unpack_message(lv_PO);
   dbms_pipe.unpack_message(lv_VENDOR);
   dbms_pipe.unpack_message(lv_DESCRIPTION);
   dbms_pipe.unpack_message(lv_RECEIVED_DATE);
   INSERT
   INTO
           test_audit
   VALUES( lv_PURC_ORDER_ID,
           lv_PURC_ORDER_LINE_NO,
           lv_RECEIVER_ID,
           lv_LINE_NO,
           lv_PRINTED,
           lv_RECEIVED_QTY,
           lv_PO,
           lv_VENDOR,
           lv_DESCRIPTION,
           lv_RECEIVED_DATE
   COMMIT;
END;
SQL> insert into test_table values('Test table row');
1 row created.
SQL> insert into test_table values('Create audit row');
insert into test_table values('Create audit row')
ERROR at line 1:
ORA-20001: Audit row generated
ORA-06512: at "TEST.TEST_TABLE_TRIGGER", line 19
ORA-04088: error during execution of trigger 'TEST.TEST_TABLE_TRIGGER'
--This bit is done in another session
SQL> exec p_Read_Audit_Pipe;
SQL> select * from test_audit;
Purch order   123 Receiver id    Line no   456   PO Number Vendor   Create audit row    06/09/2004
SQL> select * from test_table;
TEXT
Test table row

Similar Messages

  • How to insert into a table with a nested table which refer to another table

    Hello everybody,
    As the title of this thread might not be very understandable, I'm going to explain it :
    In a context of a library, I have an object table about Book, and an object table about Subscriber.
    In the table Subscriber, I have a nested table modeling the Loan made by the subscriber.
    And finally, this nested table refers to the Book table.
    Here the code concerning the creation of theses tables :
    Book :
    create or replace type TBook as object
    number int,
    title varchar2(50)
    Loan :
    create or replace type TLoan as object
    book ref TBook,
    loaning_date date
    create or replace type NTLoan as table of TLoan;
    Subscriber :
    create or replace type TSubscriber as object
    sub_id int,
    name varchar2(25)
    loans NTLoan
    Now, my problem is how to insert into a table of TSubscriber... I tried this query, without any success...
    insert into OSubscriber values
    *(1, 'LEVEQUE', NTLoan(*
    select TLoan(ref(b), '10/03/85') from OBook b where b.number = 1)
    Of course, there is an occurrence of book in the table OBook with the number attribute 1.
    Oracle returned me this error :
    SQL error : ORA-00936: missing expression
    00936. 00000 - "missing expression"
    Thank you for your help

    1) NUMBER is a reserved word - you can't use it as identifier:
    SQL> create or replace type TBook as object
      2  (
      3  number int,
      4  title varchar2(50)
      5  );
      6  /
    Warning: Type created with compilation errors.
    SQL> show err
    Errors for TYPE TBOOK:
    LINE/COL ERROR
    0/0      PL/SQL: Compilation unit analysis terminated
    3/1      PLS-00330: invalid use of type name or subtype name2) Subquery must be enclosed in parenthesis:
    SQL> create table OSubscriber of TSubscriber
      2  nested table loans store as loans
      3  /
    Table created.
    SQL> create table OBook of TBook
      2  /
    Table created.
    SQL> insert
      2    into OBook
      3    values(
      4           1,
      5           'No Title'
      6          )
      7  /
    1 row created.
    SQL> commit
      2  /
    Commit complete.
    SQL> insert into OSubscriber
      2    values(
      3           1,
      4           'LEVEQUE',
      5           NTLoan(
      6                  (select TLoan(ref(b),DATE '1985-10-03') from OBook b where b.num = 1)
      7                 )
      8          )
      9  /
    1 row created.
    SQL> select  *
      2    from  OSubscriber
      3  /
        SUB_ID NAME
    LOANS(BOOK, LOANING_DATE)
             1 LEVEQUE
    NTLOAN(TLOAN(000022020863025C8D48614D708DB5CD98524013DC88599E34C3D34E9B9DBA1418E49F1EB2, '03-OCT-85'))
    SQL> SY.

  • Error when inserting in a table with an identity column

    Hi,
    I am new to Oracle SOA suite and ESB.
    I have been through the Oracle training and have worked for about 2 months with the tooling.
    We have a Database adabter that inserts data in 5 Tables with relations to each other.
    Each table has his own not NULL Identity column.
    When running/ testing the ESB service we get the error at the end of this post.
    From this we learned that the Database adapter inserts the value NULL in the identity column.
    We cannot find in the documentation how to get the database adabter to skip this first column and ignore it.
    Is this possible within the wizard? Our impression is no
    Is this possible somwhere else/
    And if so How can we do this?
    If anyone can help it would be greatly appreciated
    Pepijn
    Generic error.
    oracle.tip.esb.server.common.exceptions.BusinessEventRejectionException: An unhandled exception has been thrown in the ESB system. The exception reported is: "org.collaxa.thirdparty.apache.wsif.WSIFException: esb:///ESB_Projects/GVB_PDI_PDI_Wegschrijven_Medewerkergegevens/testurv.wsdl [ testurv_ptt::insert(VastAdresCollection) ] - WSIF JCA Execute of operation 'insert' failed due to: DBWriteInteractionSpec Execute Failed Exception.
    insert failed. Descriptor name: [testurv.VastAdres]. [Caused by: Cannot insert explicit value for identity column in table 'VastAdres' when IDENTITY_INSERT is set to OFF.]
    ; nested exception is:
         ORABPEL-11616
    DBWriteInteractionSpec Execute Failed Exception.
    insert failed. Descriptor name: [testurv.VastAdres]. [Caused by: Cannot insert explicit value for identity column in table 'VastAdres' when IDENTITY_INSERT is set to OFF.]
    Caused by Uitzondering [TOPLINK-4002] (Oracle TopLink - 10g Release 3 (10.1.3.3.0) (Build 070608)): oracle.toplink.exceptions.DatabaseException
    Interne uitzondering: com.microsoft.sqlserver.jdbc.SQLServerException: Cannot insert explicit value for identity column in table 'VastAdres' when IDENTITY_INSERT is set to OFF.Foutcode: 544
    Call:INSERT INTO dbo.VastAdres (ID, BeginDatum, Einddatum, Land, Plaats, Postcode, VolAdres) VALUES (?, ?, ?, ?, ?, ?, ?)
         bind => [null, 1894-06-24 00:00:00.0, 1872-09-04 00:00:00.0, Nederland, Wijdewormer, 1456 NR, Oosterdwarsweg 8]
    Query:InsertObjectQuery(<VastAdres null />).

    Hi,
    Click on the resources tab in the ESB system/ Project to see the ESB system design and all the components in it.
    Click on the Database adapter that you want to edit..and make the necesary changes..
    Check this link.
    http://download-uk.oracle.com/docs/cd/B31017_01/core.1013/b28764/esb008.htm for "6.8.2 How to Modify Adapter Services" section.
    If you are calling a database procedure which inturn makes the insert, you will have to make changes in the database and you job would be much simpler. It seems there are limitations on what you can change in the Database adapter once it is created. Please check the link for further details.
    Thanks,
    Rajesh

  • Record not inserting into sap table with connector framework ?

    here is the code, but record not being inserting into the table ... but same piece of code working fine while updating ... the record ...
    try {
    interaction = connection.createInteractionEx();
    IInteractionSpec interactionSpec = interaction.getInteractionSpec();
    String functionName = "Z_XYZ";
    interactionSpec.setPropertyValue("Name", functionName);
    String writingTable = "MYTABLE";
    RecordFactory rf = interaction.getRecordFactory();
    MappedRecord importParams = rf.createMappedRecord("input");
    importParams.put("ATTR1", "VALUE1");
    importParams.put("ATTR2", "VALUE2");
    IFunction function = connection.getFunctionsMetaData().getFunction(functionName);
    IStructureFactory sf = interaction.retrieveStructureFactory();
    IRecordSet table = (IRecordSet) sf.getStructure(function.getParameter(writingTable).getStructure());
    table.insertRow();
    table.setString("ATNAME", "VALUE");
    table.setString("ATWRT", "VALUE");
    importParams.put(writingTable, table);
    MappedRecord output = (MappedRecord) interaction.execute(interactionSpec, importParams);
    } catch (Exception e) {
    any idea?
    than ks
    MMK

    Hi Mohan,
    Does a creation through SE37 with the same input work?
    Yoav.

  • Constantly inserting into large table with unique index... Guidance?

    Hello all;
    So here is my world. We have central to our data monitoring system an oracle database running Oracle Standard One (please don't laugh... I understand it is comical) licensing.
    This DB is about 1.7 TB of small record data.
    One table in particular (the raw incoming data, 350gb, 8 billion rows, just in the table) is fed millions of rows each day in real time by two to three main "data collectors" or what have you. Data must be available in this table "as fast as possible" once it is received.
    This table has 6 columns (one varchar usually empty, a few numerics including a source id, a timestamp and a create time).
    The data is collect in chronological order (increasing timestamp) 90% of the time (though sometimes the timestamp may be very old and catch up to current). The other 10% of the time the data can be out of order according to the timestamp.
    This table has two indexes, unique (sourceid, timestamp), and a non unique (create time). (FYI, this used to be an IOT until we had to add the second index on create time, at which point a secondary index on create time slowed the IOT to a crawl)
    About 80% of this data is removed after it ages beyond 3 months; 20% is retained as "special" long term data (customer pays for longer raw source retention). The data is removed using delete statements. This table is never (99.99% of the time) updated. The indexes are not rebuilt... ever... as a rebuild is about a 20+ hour process, and without online rebuilds since we are standard one, this is just not possible.
    Now what we are observing is that the inserts into this table
    - Inserts are much slower based on a "wider" cardinality of the "sourceid" of the data being inserted. What I mean is that 10,000 inserts for 10,000 sourceid (regardless of timestamp) is MUCH, MUCH slower than 10,000 inserts for a single sourceid. This makes sense to me, as I understand it that oracle must inspect more branches of the index for uniqueness, and more different physical blocks will be used to store the new index data. There are about 2 million unique sourceId across our system.
    - Over time, oracle is requesting more and more ram to satisfy these inserts in a timely matter. My understanding here is that oracle is attempting to hold the leafs of these indexes perpetually buffers. Our system does have a 99% cache hit rate. However, we are seeing oracle requiring roughly 10GB extra ram per quarter to 6 months; we're at about 50gb of ram just for oracle already.
    - If I emulate our production load on a brand new, empty table / indexes, performance is easily 10x to 20x faster than what I see when I do the same tests with the large production copies of data.
    We have the following assumption: Partitioning this table based on good logical grouping of sourceid, and then timestamp, will help reduce the work required by oracle to verify uniqueness of data, reducing the amount of data that must be cached by oracle, and allow us to handle our "older than 3 month" at a partition level, greatly reducing table and index fragmentation.
    Based on our hardware, its going to be about a million dollar hit to upgrade to Enterprise (with partitioning), plus a couple hundred thousand a year in support. Currently I think we pay a whopping 5 grand a year in support, if that, total oracle costs. This is going to be a huge pill for our company to swallow.
    What I am looking for guidance / help on, should we really expect partitioning to make a difference here? I want to get that 10x performance difference back we see between a fresh empty system, and our current production system. I also want to limit oracles 10gb / quarter growing need for more buffer cache (the cardinality of sourceid does NOT grow by that much per quarter... maybe 1000s per quarter, out of 2 million).
    Also, please I'd appreciate it if there were no mocking comments about using standard one up to this point :) I know it is risky and insane and maybe more than a bit silly, but we make due with what we have. And all the credit in the world to oracle that their "entry" level system has been able to handle everything we've thrown at it so far! :)
    Alright all, thank you very much for listening, and I look forward to hear the opinions of the experts.

    Hello,
    Here is a link to a blog article that will give you the right questions and answers which apply to your case:
    http://jonathanlewis.wordpress.com/?s=delete+90%25
    As far as you are deleting 80% of your data (old data) based on a timestamp, then don't think at all about using the direct path insert /*+ append */ as suggested by one of the contributors to this thread. The direct path load will not re-use any free space made by the delete. You have two indexes:
    (a) unique index (sourceid, timestamp)
    (b) index(create time)
    Your delete logic (based on arrival time) will smatch your indexes as far as you are always deleting the left hand side of the index; it means you will have what we call a right hand index - In other words, the scattering of the index key per leaf block is certainly catastrophic (there is an oracle iternal function named sys_op_lidbid that will allow you to verify this index information). There is a fairly chance that your two indexes will benefit from a coalesce as already suggested:
               ALTER INDEX indexname COALESCE;This coalesce should be investigated to be done on a regular basis (may be after each 80% delete) You seem to have several sourceid for one timestamp. If the answer is yes you should think about compressing this index
        create index indexname (sourceid, timestamp) compress;     
    or
        alter index indexname rebuild compress;     You will do it only once. Your index will have a smaller size and may be more efficient than it is actually. The index compression will add an extra CPU work during an insert but it might help improving the overal insert process.
    Best Regards
    Mohamed Houri

  • Understanding logminer results -- inserting row into table with CLOB field

    In using log miner I have noticed that inserts into rows that contain a CLOB (I assume this applies to other LOB type fields as well, have only tested with CLOB so far) field are actually recorded as two DML entries.
    --the first entry is the insert operation that inserts all values with an EMPTY_CLOB() for the CLOB field
    --the second entry is the update that sets the actual CLOB value (+this is true even if the value of the CLOB field is not being set explicitly+)
    This separation makes sense as there may be separate locations that the values are being stored etc.
    However, what I am tripping over is the fact the first entry, the Insert, has a RowId value of 'AAAAAAAAAAAAAAAAAA' which is invalid if I attempt to use it in a flashback query such as:
    SELECT * FROM PERSON AS OF SCN #####'  where RowId = 'AAAAAAAAAAAAAAAAAA'The second operation, the Update of the CLOB field, has the valid RowId.
    Now, again, this makes sense if the insert of the new row is not really considered "+done+" until the two steps are done. However, is there some way to group these operations together when analyzing the log contents to know that these two operations are a "+matched set+"?
    Not a total deal breaker, but would be nice to know what is happening under the hood here so I don't act on any false assumptions.
    Thanks for any input.
    To replicate:
    Create a table with CLOB field:
    CREATE TABLE DEVUSER.TESTTABLE
            ID NUMBER
           , FULLNAME VARCHAR2(50)
          , AGE NUMBER  
          , DESCRIPTION CLOB
           );Capture the before SCN:
    SELECT DBMS_FLASHBACK.GET_SYSTEM_CHANGE_NUMBER FROM DUAL;Insert a new row in the test table:
    INSERT INTO TESTTABLE(ID,FULLNAME,AGE) VALUES(1,'Robert BUILDER',35);
         COMMIT;Capture the after SCN:
    SELECT DBMS_FLASHBACK.GET_SYSTEM_CHANGE_NUMBER FROM DUAL;Start logminer session with the bracketing scn values and options etc:
    EXECUTE DBMS_LOGMNR.START_LOGMNR(STARTSCN=>2619174, ENDSCN=>2619191, -
               OPTIONS => DBMS_LOGMNR.DICT_FROM_ONLINE_CATALOG + DBMS_LOGMNR.CONTINUOUS_MINE + -
               DBMS_LOGMNR.COMMITTED_DATA_ONLY + DBMS_LOGMNR.NO_ROWID_IN_STMT + DBMS_LOGMNR.NO_SQL_DELIMITER)Query the logs for the changes in that range:
    SELECT
           commit_scn, xid,operation,table_name,row_id
           ,sql_redo,sql_undo, rs_id,ssn
           FROM V$LOGMNR_CONTENTS
        ORDER BY xid asc,sequence# ascResults:
    2619178     0C00070028000000     START                  AAAAAAAAAAAAAAAAAA     set transaction read write
    2619178     0C00070028000000     INSERT     TESTTABLE     AAAAAAAAAAAAAAAAAA     insert into "DEVUSER"."TESTTABLE" ...
    2619178     0C00070028000000     UPDATE     TESTTABLE     AAAFEXAABAAALEJAAB     update "DEVUSER"."TESTTABLE" set "DESCRIPTION" = NULL ...
    2619178     0C00070028000000     COMMIT                  AAAAAAAAAAAAAAAAAA     commitEdited by: 958701 on Sep 12, 2012 9:05 AM
    Edited by: 958701 on Sep 12, 2012 9:07 AM

    Scott,
    Thanks for the reply.
    I am inserting into the table over a database link.
    I am using the new version of HTML Db (2.0)
    HTML Db is connected to an Oracle 10 database I think, however the table I am trying to insert data into (via the database link) is in an Oracle 8 database - this is why we created a link to it as we couldn't have the HTML Db interacting with the Oracle 8 database directly due to compatibility problems (or so I've been told)
    Simon

  • Large insert op into table with indexes

    Hi,
    Oracle 8.1.7.0. Empty table (after truncate) with two indexes. Need to insert about 40 billions records. What is better way to complete this task:
    1. Drop indexes, insert data then build indexes.
    2. Simply insert data into table.
    Thanks.

    The only way to find out is to test... For example, I did a test on my single-cpu box with Oracle 9i. My test was to load all the rows from DBA_SOURCE (only 650k rows). I found that a single insert statement with bitmap indexes online ran faster than the total elapsed time for taking the indexes offline, inserting, and bringing the indexes back up...
    With 40-billion rows, I presume you're using partitioned tables and enabling parrallel DML. Thus, your test will be much different than mine...
    In past ETL projects I worked on, I found little difference in timing. I decided that I didn't want to drop indexes (it was ver8i) so I loaded the empty tables with indexes (and constraints) enabled...
    Stan

  • Insert into two tables with a single query (same ID)

    Hello,
    I want to insert two tables at the same time ( with a single query) provided that both records get inserted with the same id. How do I do this?
    Table Movies
    id
    name
    Table Category
    movie_id
    cat_typea) Insert into first table, retrieve the id (may be by using my_sequence.currval and then insert into another table.
    issue: Makes three query to the db, I am also guessing that when multiple people try to insert there will be an issue, I might be wrong.
    I don't have any other idea.
    Greatly appreciated!

    Why don't use multitable insert ? It's available from 9i.
    A sequence.nextval will return the same value within the whole instruction
    so all records can be inserted with the same id.
    Look at this example:
    DROP TABLE A;
    DROP TABLE B;
    drop sequence a_seq;
    CREATE TABLE A(
      ID NUMBER,
      FIRSTNAME VARCHAR2(50)
    CREATE TABLE B AS
    SELECT id, firstname lastname FROM a;
    CREATE SEQUENCE a_seq
    START WITH 1;
    INSERT ALL
    INTO A(ID, FIRSTNAME) VALUES(A_SEQ.NEXTVAL, FNAME)
    INTO B(ID, LASTNAME) VALUES(A_SEQ.NEXTVAL, LNAME)
    SELECT 'fname ' || LEVEL FNAME, 'lname ' || LEVEL LNAME
    FROM DUAL
    CONNECT BY LEVEL < 10
    COMMIT;
    SELECT * FROM A;
    SELECT * FROM b;
    DROP TABLE A succeeded.
    DROP TABLE B succeeded.
    drop sequence a_seq succeeded.
    CREATE TABLE succeeded.
    CREATE TABLE succeeded.
    CREATE SEQUENCE succeeded.
    18 rows inserted
    commited
    ID                     FIRSTNAME                                         
    3                      fname 1                                           
    4                      fname 2                                           
    5                      fname 3                                           
    6                      fname 4                                           
    7                      fname 5                                           
    8                      fname 6                                           
    9                      fname 7                                           
    10                     fname 8                                           
    11                     fname 9                                           
    9 rows selected
    ID                     LASTNAME                                          
    3                      lname 1                                           
    4                      lname 2                                           
    5                      lname 3                                           
    6                      lname 4                                           
    7                      lname 5                                           
    8                      lname 6                                           
    9                      lname 7                                           
    10                     lname 8                                           
    11                     lname 9                                           
    9 rows selected

  • Insert into two tables with trigger on PK in second table

    Hi evereone, ineed help.
    I have two tables (organizations, addresses).
    On addresses table i have trigger on PK. When i do insert i must get this param and insert into OrganizationTable for reference.
    Without ADF i can do insert with returning on addresses, than do insert on organizations with this returning param. How can i do this in ADF business logic using trainTaskFlow?
    Thanks all.
    Edited by: WaterStream on 15.10.2012 15:10

    thanks for reply, but i found solution in this materials:
    http://liuwuhua.blogspot.com/2010/11/master-detail-crud-in-adf-bc.html
    (on this link anyone can download model and see all params)
    but in my project i will get JBO-25030 error.
    Solution founded here:
    http://vtkrishn.com/2011/02/09/oracle-jbo-invalidownerexception/
    Work great!!!

  • How to optimize massive insert on a table with spatial index ?

    Hello,
    I need to implement a load process for saving up to 20 000 points per minutes in Oracle 10G R2.
    These points represents car locations tracked by GPS and I need to store at least all position from the past 12 hours.
    My problem is that the spatial index is very costly during insert (For the moment I do only insertion).
    My several tries for the insertion by :
    - Java and PreparedStatement.executeBatch
    - Java and generation a SQLLoader file
    - Java and insertion on view with a trigger "instead of"
    give me the same results... (not so good)
    For the moment, I work on : DROP INDEX, INSERT, CREATE INDEX phases.
    But is there a way to only DISABLE INDEX and REBUILD INDEX only for inserted rows ?
    I used the APPEND option for insertion :
    INSERT /*+ APPEND */ INTO MY_TABLE (ID, LOCATION) VALUES (?, MDSYS.SDO_GEOMETRY(2001,NULL,MDSYS.SDO_POINT_TYPE(?, ?, NULL), NULL, NULL))
    My spatial index is created with the following options :
    'sdo_indx_dims=2,layer_gtype=point'
    Is there a way to optimize these heavy load ???
    What about the PARALLEL option and how does it work ? (Not so clear for me regarding the documentation... I am not a DBA)
    Thanks in advanced

    It is possible to insert + commit 20000 points in 16 seconds.
    select * from v$version;
    BANNER                                                                         
    Oracle Database 10g Enterprise Edition Release 10.2.0.1.0 - Prod               
    PL/SQL Release 10.2.0.1.0 - Production                                         
    CORE     10.2.0.1.0     Production                                                     
    TNS for 32-bit Windows: Version 10.2.0.1.0 - Production                        
    NLSRTL Version 10.2.0.1.0 - Production                                         
    drop table testpoints;
    create table testpoints
    ( point mdsys.sdo_geometry);
    delete user_sdo_geom_metadata
    where table_name = 'TESTPOINTS'
    and   column_name = 'POINT';
    insert into user_sdo_geom_metadata values
    ('TESTPOINTS'
    ,'POINT'
    ,sdo_dim_array(sdo_dim_element('X',0,1000,0.01),sdo_dim_element('Y',0,1000,0.01))
    ,null)
    create index testpoints_i on testpoints (point)
    indextype is mdsys.spatial_index parameters ('sdo_indx_dims=2,layer_gtype=point');
    insert /*+ append */ into testpoints
    select (sdo_geometry(2001,null,sdo_point_type(1+ rownum / 20, 1 + rownum / 50, null),null,null))
    from all_objects where rownum < 20001;
    Duration: 00:00:10.68 seconds
    commit;
    Duration: 00:00:04.96 seconds
    select count(*) from testpoints;
      COUNT(*)                                                                     
         20000                                                                      The insert of 20 000 rows takes 11 seconds, the commit takes 5 seconds.
    In this example there is no data traffic between the Oracle database and a client but you have 60 -16 = 44 seconds to upload your points into a temporary table. After uploading in a temporary table you can do:
    insert /*+ append */ into testpoints
    select (sdo_geometry(2001,null,sdo_point_type(x,y, null),null,null))
    from temp_table;
    commit;Your insert ..... values is slow, do some bulk processing.
    I think it can be done, my XP computer that runs my database isn't state of the art.

  • Taking More Time while inserting into the table (With foriegn key)

    Hi All,
    I am facing problem while inserting the values into the master table.
    The problem,
    Table A -- User Master Table (Reg No, Name, etc)
    Table B -- Transaction Table (Foreign key reference with Table A).
    While inserting the data's in Table B, i need to insert the reg no also in table B which is mandatory. I followed the logic which is mentioned in the SRDemo.
    While inserting we need to query the Table A first to have the values in TableABean.java.
    final TableA tableA= (TableA )uow.executeQuery("findUser",TableA .class, regNo);
    Then, we need to create the instance for TableB
    TableB tableB= (TableB)uow.newInstance(TableB.class);
    tableB.setID(bean.getID);
    tableA.addTableB(tableB); --- this is for to insert the regNo of TableA in TableB.. This line is executing the query "select * from TableB where RegNo = <tableA.getRegNo>".
    This query is taking too much time if values are more in the TableB for that particular registrationNo. Because of this its taking more time to insert into the TableB.
    For Ex: TableA -- regNo : 101...having less entry in TableB means...inserting record is taking less than 1 sec
    regNo : 102...having more entry in TableB means...inserting record is taking more than 2 sec
    Time delay is there for different users when they enter transaction in TableB.
    I need to avoid this since in future it will take more time...from 2 sec to 10 sec, if volume of data increases mean.
    Please help me to resolve this issue...I am facing it now in production.
    Thanks & Regards
    VB

    Hello,
    Looks like you have a 1:M relationship from TableA to TableB, with a 1:1 back pointer from TableB to TableA. If triggering the 1:M relationship is causing you delays that you want to avoid there might be two quick ways I can see:
    1) Don't map it. Leave the TableA->TableB 1:M unmapped, and instead just query for relationship when you do need it. This means you do not need to call tableA.addTableB(tableB), and instead only need to call tableB.setTableA(tableA), so that the TableB->TableA relation gets set. Might not be the best option, but it depends on your application's usage. It does allow you to potentially page the TableB results or add other query query performance options when you do need the data though.
    2) You are currently using Lazy loading for the TableA->TableB relationship - if it is untriggered, don't bother calling tableA.addTableB(tableB), and instead only need to call tableB.setTableA(tableA). This of course requires using TopLink api to a) verify the collection is an IndirectCollection type, and b) that it is hasn't been triggered. If it has been triggered, you will still need to call tableA.addTableB(tableB), but it won't result in a query. Check out the oracle.toplink.indirection.IndirectContainer class and it's isInstantiated() method. This can cause problems though in highly concurrent environments, as other threads may have triggered the indirection before you commit your transaction, so that the A->B collection is not up to date - this might require refreshing the TableA if so.
    Change tracking would probably be the best option to use here, and is described in the EclipseLink wiki:
    http://wiki.eclipse.org/Introduction_to_EclipseLink_Transactions_(ELUG)#Attribute_Change_Tracking_Policy
    Best Regards,
    Chris

  • MS SQL Server 2014: Error inserting into Temp table with index and identity field

    In this thread, I mentioned a problem with SQL Server 2014:
    SQL Server 2014: Bug with IDENTITY INSERT ON
    The question was answered, it is a bug. To keep you informed on this issue, I open this discussion.
    Problem:
    The code below works perfectly fine on MS SQL Server 2008 R2 and MS SQL Server 2012, but gives an error every second time the proc is executed on MS SQL Server 2014. If I do not define any index on the temp table, the problem disappears. Defining the index
    after the insert, does not help.
    SET NOCOUNT ON
    GO
    IF EXISTS (SELECT 1 FROM sys.procedures WHERE name = 'usp_Test') DROP PROC dbo.usp_Test;
    GO
    CREATE PROC dbo.usp_Test AS
    BEGIN
    SET NOCOUNT ON
    CREATE TABLE #Source(ID integer NOT NULL);
    INSERT INTO #Source VALUES (1), (2), (3);
    CREATE TABLE #Dest (ID integer IDENTITY(1,1) NOT NULL);
    CREATE INDEX #IDX_Dest ON #Dest (ID);
    PRINT 'Check if the insert might cause an identity crisis';
    SELECT 'Source' AS SourceTable, * FROM #Source;
    SELECT 'Destination' AS DestTable, * FROM #Dest;
    SET IDENTITY_INSERT #Dest ON;
    PRINT 'Do the insert';
    INSERT INTO #Dest (ID) SELECT ID FROM #Source;
    PRINT 'Insert ready';
    SET IDENTITY_INSERT #Dest OFF;
    SELECT * FROM #Dest;
    DROP TABLE #Source;
    DROP TABLE #Dest;
    END;
    GO
    PRINT 'First execution of the proc, everything OK';
    EXEC dbo.usp_Test;
    PRINT '';
    PRINT 'Second execution of the proc, the insert fails.';
    PRINT 'Removing the index #IDX_Dest causes the error to disappear.';
    EXEC dbo.usp_Test;
    GO
    DROP PROC dbo.usp_Test;
    GO

    There is some progress. Communication from a former Microsoft employee tells us this:
    Shivendra Vishal
    Engineer at Microsoft
    I am no longer with MS, and I do not have code access, however from the public symbols, I could make out following:
    sqlmin!SetidentI2I4+0x1f3:
    000007fe`f4d865d3 488b10 mov rdx,qword ptr [rax] ds:00000000`00000000=????????????????
    ExceptionAddress: 000007fef4d865d3 (sqlmin!SetidentI2I4+0x00000000000001f3)
    ExceptionCode: c0000005 (Access violation)
    ExceptionFlags: 00000000
    NumberParameters: 2
    Parameter[0]: 0000000000000000
    Parameter[1]: 0000000000000000
    Attempt to read from address 0000000000000000
    This is a read AV and from registers it is clear that we were trying to move the value of location pointed by qword of register rax which is not valid:
    rax=0000000000000000 rbx=0000000000000038 rcx=0000000000001030
    rdx=0000000000000006 rsi=00000001f55def98 rdi=00000000106fd070
    rip=000007fef4d865d3 rsp=00000000106fcf40 rbp=00000000106fcfe9
    r8=0000000000000000 r9=00000001f55def60 r10=00000001f55defa0
    r11=00000000106fcd20 r12=0000000000000000 r13=0000000000000002
    r14=00000001f49c3860 r15=00000001f58c0040
    iopl=0 nv up ei pl nz na po nc
    cs=0033 ss=002b ds=002b es=002b fs=0053 gs=002b efl=00010206
    The stack is:
    # Child-SP RetAddr Call Site
    00 00000000`106fcf40 000007fe`f30c1437 sqlmin!SetidentI2I4+0x1f3
    01 00000000`106fd050 000007fe`f474e7ce sqlTsEs!CEsExec::GeneralEval4+0xe7
    02 00000000`106fd120 000007fe`f470e6ef sqlmin!CQScanUpdateNew::GetRow+0x43d
    03 00000000`106fd1d0 000007fe`f08ff517 sqlmin!CQueryScan::GetRow+0x81
    04 00000000`106fd200 000007fe`f091cebe sqllang!CXStmtQuery::ErsqExecuteQuery+0x36d
    05 00000000`106fd390 000007fe`f091ccb9 sqllang!CXStmtDML::XretDMLExecute+0x2ee
    06 00000000`106fd480 000007fe`f08fa058 sqllang!CXStmtDML::XretExecute+0xad
    07 00000000`106fd4b0 000007fe`f08fb66b sqllang!CMsqlExecContext::ExecuteStmts<1,1>+0x427
    08 00000000`106fd5f0 000007fe`f08fac2e sqllang!CMsqlExecContext::FExecute+0xa33
    09 00000000`106fd7e0 000007fe`f152cfaa sqllang!CSQLSource::Execute+0x86c
    0a 00000000`106fd9b0 000007fe`f152c9e8 sqllang!CStmtExecProc::XretLocalExec+0x25a
    0b 00000000`106fda30 000007fe`f152a1d8 sqllang!CStmtExecProc::XretExecExecute+0x4e8
    0c 00000000`106fe1e0 000007fe`f08fa058 sqllang!CXStmtExecProc::XretExecute+0x38
    0d 00000000`106fe220 000007fe`f08fb66b sqllang!CMsqlExecContext::ExecuteStmts<1,1>+0x427
    0e 00000000`106fe360 000007fe`f08fac2e sqllang!CMsqlExecContext::FExecute+0xa33
    0f 00000000`106fe550 000007fe`f0902267 sqllang!CSQLSource::Execute+0x86c
    10 00000000`106fe720 000007fe`f0909087 sqllang!process_request+0xa57
    11 00000000`106feee0 000007fe`f2bf49d0 sqllang!process_commands+0x4a3
    12 00000000`106ff200 000007fe`f2bf47b4 sqldk!SOS_Task::Param::Execute+0x21e
    13 00000000`106ff800 000007fe`f2bf45b6 sqldk!SOS_Scheduler::RunTask+0xa8
    14 00000000`106ff870 000007fe`f2c136ff sqldk!SOS_Scheduler::ProcessTasks+0x279
    15 00000000`106ff8f0 000007fe`f2c138f0 sqldk!SchedulerManager::WorkerEntryPoint+0x24c
    16 00000000`106ff990 000007fe`f2c13246 sqldk!SystemThread::RunWorker+0x8f
    17 00000000`106ff9c0 000007fe`f2c13558 sqldk!SystemThreadDispatcher::ProcessWorker+0x3ab
    18 00000000`106ffa70 00000000`775d59ed sqldk!SchedulerManager::ThreadEntryPoint+0x226
    19 00000000`106ffb10 00000000`7780c541 kernel32!BaseThreadInitThunk+0xd
    1a 00000000`106ffb40 00000000`00000000 ntdll!RtlUserThreadStart+0x21
    Unassembling the function:
    000007fe`f4d8658e 4c8b10 mov r10,qword ptr [rax]
    000007fe`f4d86591 4533e4 xor r12d,r12d
    000007fe`f4d86594 410fb7d5 movzx edx,r13w
    000007fe`f4d86598 4533c9 xor r9d,r9d
    000007fe`f4d8659b 4533c0 xor r8d,r8d
    000007fe`f4d8659e 488bc8 mov rcx,rax
    000007fe`f4d865a1 4489642420 mov dword ptr [rsp+20h],r12d
    000007fe`f4d865a6 41ff5230 call qword ptr [r10+30h]
    000007fe`f4d865aa 8b5597 mov edx,dword ptr [rbp-69h]
    000007fe`f4d865ad 4c8b10 mov r10,qword ptr [rax]
    000007fe`f4d865b0 4489642438 mov dword ptr [rsp+38h],r12d
    000007fe`f4d865b5 4489642430 mov dword ptr [rsp+30h],r12d
    000007fe`f4d865ba 458d442401 lea r8d,[r12+1]
    000007fe`f4d865bf 4533c9 xor r9d,r9d
    000007fe`f4d865c2 488bc8 mov rcx,rax
    000007fe`f4d865c5 c644242801 mov byte ptr [rsp+28h],1
    000007fe`f4d865ca 4488642420 mov byte ptr [rsp+20h],r12b
    000007fe`f4d865cf 41ff5250 call qword ptr [r10+50h]
    000007fe`f4d865d3 488b10 mov rdx,qword ptr [rax] <=================== AV happened over here
    000007fe`f4d865d6 488bc8 mov rcx,rax
    000007fe`f4d865d9 4c8bf0 mov r14,rax
    000007fe`f4d865dc ff5268 call qword ptr [rdx+68h]
    000007fe`f4d865df 488d55e7 lea rdx,[rbp-19h]
    000007fe`f4d865e3 4c8b00 mov r8,qword ptr [rax]
    000007fe`f4d865e6 488bc8 mov rcx,rax
    000007fe`f4d865e9 41ff5010 call qword ptr [r8+10h]
    000007fe`f4d865ed f6450a04 test byte ptr [rbp+0Ah],4
    I remember few issues with scan2ident function, I am not sure if they have fixed it however it appears that this is intoduced to SQL 2014 and we need help from MS to get this resolved as it needs code analysis.
    It is not getting simulated for other versions of SQL apart from SQL 2014.
    Also to add, interestingly, the value of rax is not visibly changed and it was successfully passed on to rcx, which has a valid value, so something should have changed the value of rax inside call to function using call qword ptr [r10+50h], and looking at this
    it appears that it might be a list of functions and we are going at particular offset [50h]. So, bottom line is that the call to function qword ptr [r10+50h], should be changing something in rax, and debugging/analyzing this code might give us some more idea.

  • Weird problem w. mysql 4.0 when inserting  into a table with auto_incremet

    Since I upgraded my mysql database from 3.23 to 4.0.1
    the following code does not work anymore:
    I get this error msg:
    <b>"Invalid argument value: Duplicate entry '2147483647' for key 1"</b>
    <code>
    package mysql4test;
    import java.sql.*;
    class test {
    public test() {
    public static void main(String[] args) {
    Connection connection = null;
    Statement st = null;
    try {
    Class.forName("org.gjt.mm.mysql.Driver").newInstance();
    connection = DriverManager.getConnection
    ("jdbc:mysql://192.168.0.4/x?user=x&password=");
    st = connection.createStatement();
    for (int i=1;i<10;i++)
    String insert = "insert into x (b) values('hello');";
    System.out.println(insert);
    st.executeUpdate(insert);
    } catch (Exception ex) { System.err.println(ex.getMessage());}
    </code>
    The table definition of table x is the following:
    create table x(a int(11) primary key auto_increment, b varchar(10));
    What makes the thing even more mysterious is, that doing the same thing as this programm does manually on an mysql client does not produces any error message.
    insert into x (b) values('hello'); works fine on the mysql client deliverd with the server.

    Hi eggsurplus!
    Yes, I succeeded in different ways to solve the problem. changing the table was one of it. but the problem is that i can't simply change all tables (there are a lot) as the are used in other programms.
    The simplest solution that i figured out so far was changing the insert from
    insert into x (b) values('hello')
    to
    insert into x (a,b) values("+i+",'hello')"
    But this solution is still not satisfactory as in more complex programs you can't just use the i variable of the for loop, but you have to add a new variable that increments on every insert.
    This still means changing a lot of code that i wrote for mysql 3.x.
    Besides, i tried also another jdbc driver and it still didn't work.
    The same bug was reported in a PHP forum, but without solution

  • [Insert in to table with the values of function]

    Hi,
    I was written the following procedure in my DB to get the values of bonuses and id from the table employee_290512
    create or replace procedure ins_desc is
    cursor i1 is
    select id, decode (description,'Tester',salary*15,'Programmer',salary*30,'Manager',salary*30) bonus from employee_290512;
    v_id employee_290512.id%type;
    v_bonus employee_290512.salary%type;
    Begin
    open i1;
    loop
    fetch i1 into v_id,v_bonus;
    exit when i1% notfound;
    dbms_output.put_line (v_id || ' '||v_bonus);
    end loop;
    close i1;
    end;
    I written the following function by calling the above procedure inside the function to achieve the result like this
    create or replace function bon_chk return number
    is
    v_result number;
    begin
    ins_desc;
    return v_result;
    end;
    I written the above two stuffs to insert the output values of function in to the table called bonuses_proc having the values (id,bonus)
    To achieve the above I executed the following query
    insert into bonuses_proc (employee_id,bonus) select bon_chk from dual;
    During the above it throws the error ORA-00947: not enough values
    i understand the function return the v_result as onve value so it throws the above error
    Could any one please clarify me on this?
    Regards
    Thelak
    Edited by: thelakbe on Jun 5, 2012 12:00 PM
    Edited by: thelakbe on Jun 5, 2012 12:01 PM
    Edited by: thelakbe on Jun 5, 2012 12:01 PM

    Hi,
    yes we can do like above too, but i dont want to give the insert permission to the table to all the user,so is there any possiblities do it with ouput of function?If you would use the output of the function (if it had two values) you also need to do the insert as you tried to do in the initial post:
    insert into bonuses_proc (employee_id,bonus) select bon_chk from dual;If you do not want insert permisions on a table for a user then you can put the whole SQL in a procedure in a schema with insert permisions and give the user exec rigth to that procedure.
    The procedure can be like:
    create or replace
    procedure insert_bonus authid definer as
    begin
    insert into bonuses_proc (employee_id,bonus)
    SELECT
      id
      ,DECODE (description, 'Tester', salary*15, 'Programmer', salary*30, 'Manager', salary*30) bonus
    FROM
      employee_290512;
    end;regards,
    Peter

  • Insert into temp table with sorting not works

    Hi,
    Am inserting some of the values into temp table . Before going to insert i will be sorting a cloumn in descending order and then i will try insert. But actually inserts in ascending order.Dont know why.
    Please find the code
    Create table #TempTable( column1 smalldateTime )
    Insert into #TempTable
    Select distinct(column1) from table1 where cloumn2 = 1 order by  column1 desc
    When i query the table
    select * from  #TempTable
    shows the dates are in ascending order instead it should in descending
    But when i query this Select distinct(column1) from table1 where cloumn2 = 1 order by  column1 desc
    dates are in descending order which means recent dates fills top

    Or use a CTE = Common Table Expression:
    CREATE TABLE #test (id int);
    INSERT INTO #test
    SELECT object_id
    FROM sys.objects;
    SELECT COUNT(*)
    FROM #test;
    GO
    ;WITH cte AS
    (SELECT Top 2 *
    FROM #test
    ORDER BY id desc)
    DELETE FROM cte;
    GO
    SELECT COUNT(*)
    FROM #test;
    GO
    DROP TABLE #test;
    Olaf Helper
    [ Blog] [ Xing] [ MVP]

Maybe you are looking for

  • How to search records in a standard table with * ?

    Hi everyone, Can anyone tell me how to search records in a standard table with * ? That is, in screen if user type * abc * for searching the records in which the field MC_STEXT contains 'abc'. What the code should be? How to complete the code below?

  • RAC Storage Options ( Openfiler or Wasabi )

    Hi RAC gurus.. I need your advice regarding storage options for our RAC development environment. Since it is a development environment so management is reluctant to invest in SAN , Our production is a 2 node RAC on windows 2003 server mounted on Dell

  • Wanted: ARIS/Solution Manager Benefits!

    Hi I am currently working on an analysis that has to lead to a recommendation of a specific tool to handle business descriptions etc. The corporation I am doing this research for is in a full blow wall to wall SAP transition. I am therefore very inte

  • Select * query is slow on one particular table

    Hi , I am using forms and reports of AppServer 10g,one particular form accesing one table ...The select * from statement on that table is slow.Any amendmends to be taken care of at the tablespace level?? Help needed urgent... Regards, Ashlee

  • What 1TB drives to buy?

    Hi Guys, I am about to buy 2x1TB drives to go inside my Powermac G5. Could someone let me know if the two types of drives I have linked to below are suitable? If not could you suggest a suitable 1TB alternative. http://www.ebuyer.com/product/143288 h