Bulk collect limit 1000 is looping only 1000 records out of 35000 records

In below code I have to loop around 35000 records for every month of the year starting from Aug-2010 to Aug-2011.
I am using bulk collect with limit clause but the problem is:
a: Limit clause is returning only 1000 records.
b: It is taking too much time to process.
CREATE OR REPLACE PACKAGE BODY UDBFINV AS
F UTL_FILE.FILE_TYPE;
PV_SEQ_NO NUMBER(7);
PV_REC_CNT NUMBER(7) := 0;
PV_CRLF VARCHAR2(2) := CHR(13) || CHR(10);
TYPE REC_PART IS RECORD(
PART_NUM PM_PART_HARSH.PART_NUM%TYPE,
ON_HAND_QTY PM_PART_HARSH.ON_HAND_QTY%TYPE,
ENGG_PREFIX PM_PART_HARSH.ENGG_PREFIX%TYPE,
ENGG_BASE PM_PART_HARSH.ENGG_BASE%TYPE,
ENGG_SUFFIX PM_PART_HARSH.ENGG_SUFFIX%TYPE);
TYPE TB_PART IS TABLE OF REC_PART;
TYPE REC_DATE IS RECORD(
START_DATE DATE,
END_DATE DATE);
TYPE TB_MONTH IS TABLE OF REC_DATE;
PROCEDURE MAIN IS
/* To be called in Scheduler Programs Action */
BEGIN
/* Initializing package global variables;*/
IFMAINT.V_PROG_NAME := 'FULL_INVENTORY';
IFMAINT.V_ERR_LOG_TAB := 'UDB_ERR_FINV';
IFMAINT.V_HIST_TAB := 'UDB_HT_FINV';
IFMAINT.V_UTL_DIR_NAME := 'UDB_SEND';
IFMAINT.V_PROG_TYPE := 'S';
IFMAINT.V_IF_TYPE := 'U';
IFMAINT.V_REC_CNT := 0;
IFMAINT.V_DEL_INS := 'Y';
IFMAINT.V_KEY_INFO := NULL;
IFMAINT.V_MSG := NULL;
IFMAINT.V_ORA_MSG := NULL;
IFSMAINT.V_FILE_NUM := IFSMAINT.V_FILE_NUM + 1;
IFMAINT.LOG_ERROR; /*Initialize error log table, delete prev. rows*/
/*End of initialization section*/
IFMAINT.SET_INITIAL_PARAM;
IFMAINT.SET_PROGRAM_PARAM;
IFMAINT.SET_UTL_DIR_PATH;
IFMAINT.GET_DEALER_PARAMETERS;
PV_SEQ_NO := IFSMAINT.GENERATE_FILE_NAME;
IF NOT CHECK_FILE_EXISTS THEN
WRITE_FILE;
END IF;
IF IFMAINT.V_BACKUP_PATH_SEND IS NOT NULL THEN
IFMAINT.COPY_FILE(IFMAINT.V_UTL_DIR_PATH,
IFMAINT.V_FILE_NAME,
IFMAINT.V_BACKUP_PATH_SEND);
END IF;
IFMAINT.MOVE_FILE(IFMAINT.V_UTL_DIR_PATH,
IFMAINT.V_FILE_NAME,
IFMAINT.V_FILE_DEST_PATH);
COMMIT;
EXCEPTION
WHEN IFMAINT.E_TERMINATE THEN
IFMAINT.V_DEL_INS := 'N';
IFMAINT.LOG_ERROR;
ROLLBACK;
UTL_FILE.FCLOSE(F);
IFMAINT.DELETE_FILE(IFMAINT.V_UTL_DIR_PATH, IFMAINT.V_FILE_NAME);
RAISE_APPLICATION_ERROR(IFMAINT.V_USER_ERRCODE, IFMAINT.V_ORA_MSG);
WHEN OTHERS THEN
IFMAINT.V_DEL_INS := 'N';
IFMAINT.V_MSG := 'ERROR IN MAIN PROCEDURE ||IFMAINT.V_PROG_NAME';
IFMAINT.V_ORA_MSG := SUBSTR(SQLERRM, 1, 255);
IFMAINT.V_USER_ERRCODE := -20101;
IFMAINT.LOG_ERROR;
ROLLBACK;
UTL_FILE.FCLOSE(F);
IFMAINT.DELETE_FILE(IFMAINT.V_UTL_DIR_PATH, IFMAINT.V_FILE_NAME);
RAISE_APPLICATION_ERROR(IFMAINT.V_USER_ERRCODE, IFMAINT.V_ORA_MSG);
END;
PROCEDURE WRITE_FILE IS
CURSOR CR_PART IS
SELECT A.PART_NUM, ON_HAND_QTY, ENGG_PREFIX, ENGG_BASE, ENGG_SUFFIX
FROM PM_PART_HARSH A;
lv_cursor TB_PART;
LV_CURR_MONTH NUMBER;
LV_MONTH_1 NUMBER := NULL;
LV_MONTH_2 NUMBER := NULL;
LV_MONTH_3 NUMBER := NULL;
LV_MONTH_4 NUMBER := NULL;
LV_MONTH_5 NUMBER := NULL;
LV_MONTH_6 NUMBER := NULL;
LV_MONTH_7 NUMBER := NULL;
LV_MONTH_8 NUMBER := NULL;
LV_MONTH_9 NUMBER := NULL;
LV_MONTH_10 NUMBER := NULL;
LV_MONTH_11 NUMBER := NULL;
LV_MONTH_12 NUMBER := NULL;
lv_month TB_MONTH := TB_MONTH();
BEGIN
IF CR_PART%ISOPEN THEN
CLOSE CR_PART;
END IF;
FOR K IN 1 .. 12 LOOP
lv_month.EXTEND();
lv_month(k).start_date := ADD_MONTHS(TRUNC(SYSDATE, 'MM'), - (K + 1));
lv_month(k).end_date := (ADD_MONTHS(TRUNC(SYSDATE, 'MM'), -K) - 1);
END LOOP;
F := utl_file.fopen(IFMAINT.V_UTL_DIR_NAME, IFMAINT.V_FILE_NAME, 'W');
IF UTL_FILE.IS_OPEN(F) THEN
/*FILE HEADER*/
utl_file.put_line(F,
RPAD('$CUD-', 5, ' ') ||
RPAD(SUBSTR(IFMAINT.V_PANDA_CD, 1, 5), 5, ' ') ||
RPAD('-136-', 5, ' ') || RPAD('000000', 6, ' ') ||
RPAD('-REDFLEX-KA-', 13, ' ') ||
RPAD('00000000-', 9, ' ') ||
RPAD(IFMAINT.V_CDS_SPEC_REL_NUM, 5, ' ') ||
RPAD('CD', 2, ' ') ||
RPAD(TO_CHAR(SYSDATE, 'MMDDYY'), 6, ' ') ||
LPAD(IFSMAINT.V_FILE_NUM, 2, 0) ||
RPAD('-', 1, ' ') || RPAD(' ', 9, ' ') ||
RPAD('-', 1, ' ') || RPAD(' ', 17, ' ') ||
RPAD('CD230', 5, ' ') ||
RPAD(TO_CHAR(SYSDATE, 'MMDDYY'), 6, ' ') ||
LPAD(IFSMAINT.V_FILE_NUM, 2, 0) ||
LPAD(PV_REC_CNT, 8, 0) || RPAD(' ', 5, ' ') ||
RPAD('00000000', 8, ' ') || RPAD('CUD', 3, ' ') ||
RPAD(IFMAINT.V_CDS_SPEC_REL_NUM, 5, ' ') ||
RPAD(IFMAINT.V_GEO_SALES_AREA_CD, 3, ' ') ||
RPAD(IFMAINT.V_FRANCHISE_CD, 2, ' ') ||
RPAD(IFMAINT.V_DSP_REL_NUM, 9, ' ') ||
RPAD('00136REDFLEX', 12, ' ') || RPAD(' ', 1, ' ') ||
RPAD('KA', 2, ' ') || RPAD('000000', 6, ' ') ||
RPAD('00D', 3, ' ') ||
RPAD(IFMAINT.V_VENDOR_ID, 6, ' ') ||
RPAD(IFSMAINT.V_FILE_TYPE, 1, ' ') ||
RPAD('>', 1, ' ') || PV_CRLF);
/*LINE ITEMS*/
OPEN CR_PART;
FETCH CR_PART BULK COLLECT
INTO lv_cursor limit 1000;
FOR I IN lv_cursor.FIRST .. lv_cursor.LAST LOOP
SELECT SUM(A.BILL_QTY)
INTO LV_CURR_MONTH
FROM PD_ISSUE A, PH_ISSUE B
WHERE A.DOC_TYPE IN ('CRI', 'RRI', 'RSI', 'CSI')
AND A.DOC_NUM = B.DOC_NUM
AND B.DOC_DATE BETWEEN TRUNC(SYSDATE, 'MM') AND SYSDATE
AND A.PART_NUM = LV_CURSOR(i).PART_NUM;
FOR J IN 1 .. 12 LOOP
SELECT SUM(A.BILL_QTY)
INTO LV_MONTH_1
FROM PD_ISSUE A, PH_ISSUE B
WHERE A.DOC_TYPE IN ('CRI', 'RRI', 'RSI', 'CSI')
AND A.DOC_NUM = B.DOC_NUM
AND B.DOC_DATE BETWEEN lv_month(J).start_date and lv_month(J)
.end_date
AND A.PART_NUM = LV_CURSOR(i).PART_NUM;
END LOOP;
utl_file.put_line(F,
RPAD('IL', 2, ' ') ||
RPAD(TO_CHAR(SYSDATE, 'RRRRMMDD'), 8, ' ') ||
RPAD(LV_CURSOR(I).ENGG_PREFIX, 6, ' ') ||
RPAD(LV_CURSOR(I).ENGG_BASE, 8, ' ') ||
RPAD(LV_CURSOR(I).ENGG_SUFFIX, 6, ' ') ||
LPAD(LV_CURSOR(I).ON_HAND_QTY, 7, 0) ||
LPAD(NVL(LV_CURR_MONTH, 0), 7, 0) ||
LPAD(LV_MONTH_1, 7, 0) || LPAD(LV_MONTH_2, 7, 0) ||
LPAD(LV_MONTH_3, 7, 0) || LPAD(LV_MONTH_4, 7, 0) ||
LPAD(LV_MONTH_5, 7, 0) || LPAD(LV_MONTH_6, 7, 0) ||
LPAD(LV_MONTH_7, 7, 0) || LPAD(LV_MONTH_8, 7, 0) ||
LPAD(LV_MONTH_9, 7, 0) || LPAD(LV_MONTH_10, 7, 0) ||
LPAD(LV_MONTH_11, 7, 0) ||
LPAD(LV_MONTH_12, 7, 0));
IFMAINT.V_REC_CNT := IFMAINT.V_REC_CNT + 1;
END LOOP;
CLOSE CR_PART;
/*TRAILER*/
utl_file.put_line(F,
RPAD('$EOF-', 5, ' ') || RPAD('320R', 4, ' ') ||
RPAD(SUBSTR(IFMAINT.V_PANDA_CD, 1, 5), 5, ' ') ||
RPAD(' ', 5, ' ') ||
RPAD(IFMAINT.V_GEO_SALES_AREA_CD, 3, ' ') ||
RPAD(TO_CHAR(SYSDATE, 'MM-DD-RR'), 6, ' ') ||
LPAD(IFSMAINT.V_FILE_NUM, 2, 0) ||
LPAD(IFMAINT.V_REC_CNT, 8, 0) || 'H' || '>' ||
IFMAINT.V_REC_CNT);
utl_file.fclose(F);
IFMAINT.INSERT_HISTORY;
END IF;
END;
FUNCTION CHECK_FILE_EXISTS RETURN BOOLEAN IS
LB_FILE_EXIST BOOLEAN := FALSE;
LN_FILE_LENGTH NUMBER;
LN_BLOCK_SIZE NUMBER;
BEGIN
UTL_FILE.FGETATTR(IFMAINT.V_UTL_DIR_NAME,
IFMAINT.V_FILE_NAME,
LB_FILE_EXIST,
LN_FILE_LENGTH,
LN_BLOCK_SIZE);
IF LB_FILE_EXIST THEN
RETURN TRUE;
END IF;
RETURN FALSE;
EXCEPTION
WHEN OTHERS THEN
RETURN FALSE;
END;
END;

Try this:
OPEN CR_PART;
loop
FETCH CR_PART BULK COLLECT
INTO lv_cursor limit 1000;
exit when CR_PART%notfound;
FOR I IN lv_cursor.FIRST .. lv_cursor.LAST LOOP
SELECT SUM(A.BILL_QTY)
INTO LV_CURR_MONTH
FROM PD_ISSUE A, PH_ISSUE B
WHERE A.DOC_TYPE IN ('CRI', 'RRI', 'RSI', 'CSI')
AND A.DOC_NUM = B.DOC_NUM
AND B.DOC_DATE BETWEEN TRUNC(SYSDATE, 'MM') AND SYSDATE
AND A.PART_NUM = LV_CURSOR(i).PART_NUM;
FOR J IN 1 .. 12 LOOP
SELECT SUM(A.BILL_QTY)
INTO LV_MONTH_1
FROM PD_ISSUE A, PH_ISSUE B
WHERE A.DOC_TYPE IN ('CRI', 'RRI', 'RSI', 'CSI')
AND A.DOC_NUM = B.DOC_NUM
AND B.DOC_DATE BETWEEN lv_month(J).start_date and lv_month(J)
.end_date
AND A.PART_NUM = LV_CURSOR(i).PART_NUM;
END LOOP;
utl_file.put_line(F,
RPAD('IL', 2, ' ') ||
RPAD(TO_CHAR(SYSDATE, 'RRRRMMDD'), 8, ' ') ||
RPAD(LV_CURSOR(I).ENGG_PREFIX, 6, ' ') ||
RPAD(LV_CURSOR(I).ENGG_BASE, 8, ' ') ||
RPAD(LV_CURSOR(I).ENGG_SUFFIX, 6, ' ') ||
LPAD(LV_CURSOR(I).ON_HAND_QTY, 7, 0) ||
LPAD(NVL(LV_CURR_MONTH, 0), 7, 0) ||
LPAD(LV_MONTH_1, 7, 0) || LPAD(LV_MONTH_2, 7, 0) ||
LPAD(LV_MONTH_3, 7, 0) || LPAD(LV_MONTH_4, 7, 0) ||
LPAD(LV_MONTH_5, 7, 0) || LPAD(LV_MONTH_6, 7, 0) ||
LPAD(LV_MONTH_7, 7, 0) || LPAD(LV_MONTH_8, 7, 0) ||
LPAD(LV_MONTH_9, 7, 0) || LPAD(LV_MONTH_10, 7, 0) ||
LPAD(LV_MONTH_11, 7, 0) ||
LPAD(LV_MONTH_12, 7, 0));
IFMAINT.V_REC_CNT := IFMAINT.V_REC_CNT + 1;
END LOOP;
end loop;
CLOSE CR_PART;

Similar Messages

  • Good bulk collect limit

    I wrote a plsql procedure using bulk collect / forall
    The procedure uses bulk collect to fetch from a normal cursor, Then I am using for all to insert into
    target table, The number of rows are 234965470
    Question:
    What should ideally be the limit for my bulk collect ?
    According to below, it should be in hundreds
    http://asktom.oracle.com/pls/asktom/f?p=100:11:0::::P11_QUESTION_ID:1583402705463
    I put a bulk collect limit of 50000 - took close to 2 hours
    then i tried 10000 - just 3 mins shorter than the above time
    But if you commit every 500 rows, Then is there not another theory that frequent commits is not good ?

    user650888 wrote:
    What should ideally be the limit for my bulk collect ?The answer to that is, What Does a Bulk Collect Do?
    And no, it is a fallacy that it makes SQL faster. It does not. Never did. Never will.
    A bulk process reduces the number of calls that need to be made to the SQL engine. In PL/SQL that is called context switching as the PL and SQL engines are tightly coupled.
    If for example, the SQL cursor outputs a 1000 rows and single row fetches are used, a 1000 calls or context switches are required, for transferring the row data from the one engine to the other. If a bulk processing of a 100 is for example used, then only 10 context switches are needed. That is a significant reduction in context switches.
    If you do a 1000 row bulk collect, only 1 context switch is needed. That is a barely noticeable difference in the time between 1 and 10 context switches. So using a bulk limit of a 1000 will not improve performance at all versus a 100 row limit.
    There is a price for this - bulk processing needs to use very expensive private process memory on the server. Oracle calls this the PGA. Consider the difference in memory between a 100 limit and a 1000 limit. 10x more PGA is needed for a 1000 limit - and no real performance gains result as there is a negligible reduction in context switches.
    A 100 limit is often bandied around as the bulk collect limit that is the best. That is not really true. If the rows are very small, impact on the PGA is less severe - a higher limit can make sense.
    Likewise, if the rows are very large (100+ large columns fetched), then a 100 limit can make an unreasonable demand on PGA... which will quickly become a bad performance situation when a bunch of clients all execute this code at the same time.
    So the sweet spot for a bulk limit typically varies between 10 and a 1000.
    I put a bulk collect limit of 50000 - took close to 2 hours
    then i tried 10000 - just 3 mins shorter than the above timeThis is just plain wrong. As you've seen, you are not improving performance at all. In fact, your code can cause severe performance problems on the server due to the high demand on private process memory being made, and the increase in work of the swap daemons that need to keep up with this demand.
    Bulk processing DOES NOT INCRESASE SQL performance. This is important to understand. The ONLY THING that it does, is to reduce the number of calls between the SQL and PL/SQL engines.
    But if you commit every 500 rows, Then is there not another theory that frequent commits is not good ?That is not just plain wrong, but an idiotic approach. A commit is work. Why do you want to add more work to the process and expect that to increase performance?

  • BULK COLLECT LIMIT affects performance in any way?

    Hi,
    I want to know whether BULK COLLECT LIMIT affects performance in any way?
    How does one decide what limit to keep?
    is there a oracle recommendation on the same?

    I agree with Bonist that you should read Tom's article but I am going to disagree, to a minor extent, with Tom's comment about 100 rows.
    When developing with BULK COLLECT I always add a parameter to stored procedures that is used to tune the limit clause Then when the code goes to unit testing, and at the beginning of integrated unit testing, the value is varied and the results graphed. For final testing the parameter is dropped. The number I hard code for production is the value at the left side of the top of the bell curve.
    What I find is that 100 is sometimes the right number butI have one app I developed recently where 50,000 was the right number (11gR1). As with almost everything Oracle the best answer is always that "it depends." In the case where 50K is the best solution the server has 32G of RAM, and everything processed is an SMS message averaging only 79 bytes.

  • Where to put the commit in the FORALL BULK COLLECT LOOP

    Hi,
    Have the following LOOP code using FORALL and bulk collect, but didnt know where to put the
    'commit' :
    open f_viewed;
    LOOP
    fetch f_viewed bulk collect into f_viewed_rec LIMIT 2000;
    forall i in 1..f_viewed_rec.count
    insert into jwoodman.jw_job_history_112300
    values f_viewed_rec(i);
    --commit; [Can I put this 'commit' here? - Jenny]
    EXIT when f_viewed%NOTFOUND;
    END LOOP;
    commit;
    Thanks,
    - Jenny

    mc**** wrote:
    Bulk collect normally used with large data sets. If you have less dataset such as 1000-2000 records then you canot get such a performance improvent using bulk collect.(Please see oracle documents for this)
    When you update records Oracle acquire exclusive lock for that. So if you use commit inside the loop then it will process number of records defined by limit parameter at ones and then commit those changes.
    That will release all locks acquired by Oracle and also teh memory used to keep those uncommited transactions.
    If you use commit outside the loop,
    Just assume that you insert 100,000 records, all those records will store in oracle memory and it will affect all other users performance as well.
    Further more if you update 100,000 records then it will hold exclusive lock for all 100,000 records addtion to the usage of the oracle memory.
    I am using this for telco application which we process over 30 million complex records (one row has 234 columns).
    When we work with large data sets we do not depends with the oracle basic rollback function. because when you keep records without commit itb uses oracle memory and badly slowdown all other processes.Hi mc****,
    What a load of dangerous and inaccurate rubbish to be telling a new Oracle developer. Commit processing should be driven by the logical unit of a transaction. This should hold true whether that transaction involves a few rows or millions. If, and only if, the transaction is so large that it affects the size constraints of the database resources, in particular, rollback or redo space, then you can consider breaking that transaction up to smaller transactions.
    Why is frequent committing undesirable I hear you ask?
    First of all it is hugely wasteful of rollback or redo space. This is because while the database is capable of locking at a row level, redo is written at a block level, which means that if you update, delete or insert a million rows and commit after each individual statement, then that is a million blocks that need to go into redo. As many of these rows will be in the same block, if you instead do these as one transaction, then the same block in redo can be transacted upon, making the operation more efficient. True, locks will be held for longer, but if this is new data being done in batches then users will rarely be inconvenienced. If locking is a problem then I would suggest that you should be looking at how you are doing your processing.
    Secondly, committing brings into play one of the major serialization points in the database, log sync. When a transaction is committed, the log buffer needs to be written to disc. This occurs serially for multiple commits. Each commit has to wait until the commit before has completed. This becomes even more of a bottleneck if you are using Data Guard in SYNC mode, as the commit cycle does not complete until the remote log is notified as written.
    This then brings us two rules of thumb that will always lead a developer in the right direction.
    1. Commit as infrequently as possible, usually at the logical unit of a transaction
    2. When building transactions, first of all seek to do it using straight SQL (CTAS, insert select, update where etc). If this can't be easily achieved, then use PL/SQL bulk operations.
    Regards
    Andre

  • Bulk Collect into is storing less no of rows in collection when using LIMIT?

    I have written the following anonymous PL SQL Block. However, the line dbms_output.put_line(total_tckt_col.LAST) gives me output as 366 (in DBMS_OUTPUT is SQL Developer) which is correct when no limit is set. If the limit is set to 100 in the FETCH statement then dbms_output.put_line(total_tckt_col.LAST) gives me 66. What I am doing wrong here?
    DECLARE
       CURSOR cur_total_tckt 
       is
       select  t.ticket_id ticket_id, t.created_date created_date, t.created_by created_by, t.ticket_status ticket_status,
      t.last_changed last_changed, h.created_date closed_date
       from n01.cc_ticket_info t
       inner join n01.cc_ticket_status_history h
       on (t.ticket_id = h.ticket_id)
       where t.last_changed >= '6/28/2012 17:28:59' and t.last_changed < (sysdate + interval '1' day);
      type total_tckt_colcn
       is
       TABLE OF cur_total_tckt%rowtype;
      total_tckt_col total_tckt_colcn;
      total_coach_col total_tckt_colcn;
       begin
      total_tckt_col  := total_tckt_colcn ();
      total_coach_col := total_tckt_colcn ();
       OPEN cur_total_tckt;
      loop
       fetch cur_total_tckt bulk collect into total_tckt_col;
    -- fetch cur_total_tckt bulk collect into total_tckt_col limit 100;
       EXIT
       WHEN (cur_total_tckt%NOTFOUND);
       END LOOP ;
       CLOSE cur_total_tckt;  
      dbms_output.put_line(total_tckt_col.LAST); 
       FOR i IN total_tckt_col.first..total_tckt_col.last
      LOOP
      dbms_output.put_line(i);
       END LOOP;
       end;

    Ishan wrote:
    Here is modified version of your code on standard EMP table in scott schema.
    Did you test it? All you demonstrate is last batch has 4 rows. But you print it outsite the loop. This way if last batch is incomplete (has less than limit rows) your loop doesn't process last batch. Assume you want to print enames:
    DECLARE
        CURSOR cur_total_tckt
          IS
            select  ename
              from  emp; -- I have a total of 14 records in this table
            type total_tckt_colcn
              is
                TABLE OF cur_total_tckt%rowtype;
            total_tckt_col total_tckt_colcn;
    BEGIN
        total_tckt_col  := total_tckt_colcn ();
        OPEN cur_total_tckt;
        LOOP
          fetch cur_total_tckt bulk collect into total_tckt_col limit 5;
          EXIT WHEN cur_total_tckt%NOTFOUND;
          FOR v_i IN 1..total_tckt_col.count LOOP
            dbms_output.put_line(total_tckt_col(v_i).ename);
          END LOOP;
        END LOOP ;
        CLOSE cur_total_tckt;
    END;
    SMITH
    ALLEN
    WARD
    JONES
    MARTIN
    BLAKE
    CLARK
    SCOTT
    KING
    TURNER
    PL/SQL procedure successfully completed.
    SQL>
    As you can see, it didn't print last batch. Why? Because NOTFOUND is set to true if exact number of rows you asked to fetch was not fetched. So last batch has 4 rows while code asks to fetch 5. Therefore, NOTFOUND is set to true and code exits before processing that last batch. So you have to repeat processing code again outside the loop:
    DECLARE
        CURSOR cur_total_tckt
          IS
            select  ename
              from  emp; -- I have a total of 14 records in this table
            type total_tckt_colcn
              is
                TABLE OF cur_total_tckt%rowtype;
            total_tckt_col total_tckt_colcn;
    BEGIN
        total_tckt_col  := total_tckt_colcn ();
        OPEN cur_total_tckt;
        LOOP
          fetch cur_total_tckt bulk collect into total_tckt_col limit 5;
          EXIT WHEN cur_total_tckt%NOTFOUND;
          FOR v_i IN 1..total_tckt_col.count LOOP
            dbms_output.put_line(total_tckt_col(v_i).ename);
          END LOOP;
        END LOOP ;
        FOR v_i IN 1..total_tckt_col.count LOOP
          dbms_output.put_line(total_tckt_col(v_i).ename);
        END LOOP;
        CLOSE cur_total_tckt;
    END;
    SMITH
    ALLEN
    WARD
    JONES
    MARTIN
    BLAKE
    CLARK
    SCOTT
    KING
    TURNER
    ADAMS
    JAMES
    FORD
    MILLER
    PL/SQL procedure successfully completed.
    SQL>
    But you must agree repeating that processing code twice isn't best solution. When using BULK COLLECT LIMIT we should exit not by NOTFOUND but rather by collection.count = 0:
    DECLARE
        CURSOR cur_total_tckt
          IS
            select  ename
              from  emp; -- I have a total of 14 records in this table
            type total_tckt_colcn
              is
                TABLE OF cur_total_tckt%rowtype;
            total_tckt_col total_tckt_colcn;
    BEGIN
        total_tckt_col  := total_tckt_colcn ();
        OPEN cur_total_tckt;
        LOOP
          fetch cur_total_tckt bulk collect into total_tckt_col limit 6;
          EXIT WHEN total_tckt_col.count = 0;
          FOR v_i IN 1..total_tckt_col.count LOOP
            dbms_output.put_line(total_tckt_col(v_i).ename);
          END LOOP;
        END LOOP ;
        CLOSE cur_total_tckt;
    END;
    SMITH
    ALLEN
    WARD
    JONES
    MARTIN
    BLAKE
    CLARK
    SCOTT
    KING
    TURNER
    ADAMS
    JAMES
    FORD
    MILLER
    PL/SQL procedure successfully completed.
    SQL>
    SY.

  • BULK COLLECT and FORALL with dynamic INSERT.

    Hello,
    I want to apply BULK COLLECT and FORALL feature for a insert statement in my procedure for performance improvements as it has to insert a huge amount of data.
    But the problem is that the insert statement gets generated dynamically and even the table name is found at the run-time ... so i am not able to apply the performance tuning concepts.
    See below the code
    PROCEDURE STP_MES_INSERT_GLOBAL_TO_MAIN
      (P_IN_SRC_TABLE_NAME                  VARCHAR2 ,
      P_IN_TRG_TABLE_NAME                  VARCHAR2 ,
      P_IN_ED_TRIG_ALARM_ID                 NUMBER ,
      P_IN_ED_CATG_ID                 NUMBER ,
      P_IN_IS_PIECEID_ALARM IN CHAR,
      P_IN_IS_LAST_RECORD IN CHAR
    IS
          V_START_DATA_ID                 NUMBER;
          V_STOP_DATA_ID                   NUMBER;
          V_FROM_DATA_ID                   NUMBER;
          V_TO_DATA_ID                       NUMBER;
          V_MAX_REC_IN_LOOP              NUMBER := 30000;
          V_QRY1         VARCHAR2(32767);
    BEGIN
                EXECUTE IMMEDIATE 'SELECT MIN(ED_DATA_ID), MAX(ED_DATA_ID) FROM '|| P_IN_SRC_TABLE_NAME INTO V_START_DATA_ID , V_STOP_DATA_ID;
                --DBMS_OUTPUT.PUT_LINE('ORIGINAL START ID := '||V_START_DATA_ID ||' ORIGINAL  STOP ID := ' || V_STOP_DATA_ID);
                V_FROM_DATA_ID := V_START_DATA_ID ;
                IF (V_STOP_DATA_ID - V_START_DATA_ID ) > V_MAX_REC_IN_LOOP THEN
                        V_TO_DATA_ID := V_START_DATA_ID + V_MAX_REC_IN_LOOP;
                ELSE
                       V_TO_DATA_ID := V_STOP_DATA_ID;
                END IF;
              LOOP
                    BEGIN
                 LOOP      
                            V_QRY1 := ' INSERT INTO '||P_IN_TRG_TABLE_NAME||
                            ' SELECT * FROM '||P_IN_SRC_TABLE_NAME ||
                            ' WHERE ED_DATA_ID BETWEEN ' || V_FROM_DATA_ID ||' AND ' || V_TO_DATA_ID;
                    EXECUTE IMMEDIATE V_QRY1;
    commit;
                                     V_FROM_DATA_ID :=  V_TO_DATA_ID + 1;
                            IF  ( V_STOP_DATA_ID - V_TO_DATA_ID > V_MAX_REC_IN_LOOP ) THEN
                                  V_TO_DATA_ID := V_TO_DATA_ID + V_MAX_REC_IN_LOOP;
                            ELSE
                                  V_TO_DATA_ID := V_TO_DATA_ID + (V_STOP_DATA_ID - V_TO_DATA_ID);
                            END IF;
                    EXCEPTION
                             WHEN OTHERS THEN.............
    ....................so on Now you can observer here that P_IN_SRC_TABLE_NAME is the source table name which we get as a parameter at run-time. I have used 2 table in the insert statement P_IN_TRG_TABLE_NAME (in which i have to insert data) and P_IN_SRC_TABLE_NAME(from where i have to insert data)
      V_QRY1 := ' INSERT INTO '||P_IN_TRG_TABLE_NAME||
                            ' SELECT * FROM '||P_IN_SRC_TABLE_NAME ||
                            ' WHERE ED_DATA_ID BETWEEN ' || V_FROM_DATA_ID ||' AND ' || V_TO_DATA_ID;
                    EXECUTE IMMEDIATE V_QRY1;now when i appy the bulk collect and forall feature i am facing the out of scope problem....see the code below ...
    BEGIN
                EXECUTE IMMEDIATE 'SELECT MIN(ED_DATA_ID), MAX(ED_DATA_ID) FROM '|| P_IN_SRC_TABLE_NAME INTO V_START_DATA_ID , V_STOP_DATA_ID;
                --DBMS_OUTPUT.PUT_LINE('ORIGINAL START ID := '||V_START_DATA_ID ||' ORIGINAL  STOP ID := ' || V_STOP_DATA_ID);
                V_FROM_DATA_ID := V_START_DATA_ID ;
                IF (V_STOP_DATA_ID - V_START_DATA_ID ) > V_MAX_REC_IN_LOOP THEN
                        V_TO_DATA_ID := V_START_DATA_ID + V_MAX_REC_IN_LOOP;
                ELSE
                       V_TO_DATA_ID := V_STOP_DATA_ID;
                END IF;
              LOOP
                    DECLARE
                     TYPE TRG_TABLE_TYPE IS TABLE OF P_IN_SRC_TABLE_NAME%ROWTYPE;
                     V_TRG_TABLE_TYPE TRG_TABLE_TYPE;
                     CURSOR TRG_TAB_CUR IS
                     SELECT * FROM P_IN_SRC_TABLE_NAME
                     WHERE ED_DATA_ID BETWEEN V_FROM_DATA_ID AND V_TO_DATA_ID;
                     V_QRY1 varchar2(32767);
                    BEGIN
                    OPEN TRG_TAB_CUR;
                    LOOP
                    FETCH TRG_TAB_CUR BULK COLLECT INTO V_TRG_TABLE_TYPE LIMIT 30000;
                    FORALL I IN 1..V_TRG_TABLE_TYPE.COUNT
                    V_QRY1 := ' INSERT INTO '||P_IN_TRG_TABLE_NAME||' VALUES V_TRG_TABLE_TYPE(I);'
                    EXECUTE IMMEDIATE V_QRY1;
                    EXIT WHEN TRG_TAB_CUR%NOTFOUND;
                    END LOOP;
                    CLOSE TRG_TAB_CUR;
                            V_FROM_DATA_ID :=  V_TO_DATA_ID + 1;
                            IF  ( V_STOP_DATA_ID - V_TO_DATA_ID > V_MAX_REC_IN_LOOP ) THEN
                                  V_TO_DATA_ID := V_TO_DATA_ID + V_MAX_REC_IN_LOOP;
                            ELSE
                                  V_TO_DATA_ID := V_TO_DATA_ID + (V_STOP_DATA_ID - V_TO_DATA_ID);
                            END IF;
                    EXCEPTION
                             WHEN OTHERS THEN.........so on
    But the above code is not helping me ,  what i am doing wrong ??? how can i tune this dynamically generated statement to use bulk collect for better performace ......
    Thanks in Advance !!!!

    Hello,
    a table name cannot be bind as a parameter in SQL, this wont't compile:
    EXECUTE IMMEDIATE ' INSERT INTO :1 VALUES ......
    USING P_IN_TRG_TABLE_NAME ...but this should work:
    EXECUTE IMMEDIATE ' INSERT INTO ' || P_IN_TRG_TABLE_NAME || ' VALUES ......You cannot declare a type that is based on a table which name is in a variable.
    PL/SQL is stronly typed language, a type must be known at compile time, a code like this is not allowed:
    PROCEDURE xx( src_table_name varchar2 )
    DECLARE
       TYPE tab IS TABLE OF src_table_name%ROWTYPE;
      ...This can be done by creating one big dynamic SQL - see example below (tested on Oracle 10 XE - this is a slightly simplified version of your procedure):
    CREATE OR REPLACE
    PROCEDURE stp1(
      p_in_src_table_name                  VARCHAR2 ,
      p_in_trg_table_name                  VARCHAR2 ,
      v_from_data_id     NUMBER := 100,
      v_to_data_id       NUMBER := 100000
    IS
    BEGIN
      EXECUTE IMMEDIATE q'{
      DECLARE
         TYPE trg_table_type IS TABLE OF }' || p_in_src_table_name || q'{%ROWTYPE;
         V_TRG_TABLE_TYPE TRG_TABLE_TYPE;
         CURSOR TRG_TAB_CUR IS
         SELECT * FROM }' || p_in_src_table_name ||
         q'{ WHERE ED_DATA_ID BETWEEN :V_FROM_DATA_ID AND :V_TO_DATA_ID;
      BEGIN
          OPEN TRG_TAB_CUR;
          LOOP
                FETCH TRG_TAB_CUR BULK COLLECT INTO V_TRG_TABLE_TYPE LIMIT 30000;
                FORALL I IN 1 .. V_TRG_TABLE_TYPE.COUNT
                INSERT INTO }' || p_in_trg_table_name || q'{ VALUES V_TRG_TABLE_TYPE( I );
                EXIT WHEN TRG_TAB_CUR%NOTFOUND;
          END LOOP;
          CLOSE TRG_TAB_CUR;
      END; }'
      USING v_from_data_id, v_to_data_id;
      COMMIT;
    END;But this probably won't give any performace improvements. Bulk collect and forall can give performance improvements when there is a DML operation inside a loop,
    and this one single DML operates on only one record or relatively small number of records, and this DML is repeated many many times in the loop.
    I guess that your code is opposite to this - it contains insert statement that operates on many records (one single insert ~ 30000 records),
    and you are trying to replace it with bulk collect/forall - INSERT INTO ... SELECT FROM will almost alwayst be faster than bulk collect/forall.
    Look at simple test - below is a procedure that uses INSERT ... SELECT :
    CREATE OR REPLACE
    PROCEDURE stp(
      p_in_src_table_name                  VARCHAR2 ,
      p_in_trg_table_name                  VARCHAR2 ,
      v_from_data_id     NUMBER := 100,
      v_to_data_id       NUMBER := 100000
    IS
    V_QRY1 VARCHAR2(32767);
    BEGIN
          V_QRY1 := ' INSERT INTO '||   P_IN_TRG_TABLE_NAME ||
                    ' SELECT * FROM '|| P_IN_SRC_TABLE_NAME ||
                    ' WHERE ed_data_id BETWEEN :f AND :t ';
          EXECUTE IMMEDIATE V_QRY1
          USING V_FROM_DATA_ID, V_TO_DATA_ID;
          COMMIT;
    END;
    /and we can compare both procedures:
    SQL> CREATE TABLE test333
      2  AS SELECT level ed_data_id ,
      3            'XXX ' || LEVEL x,
      4            'YYY ' || 2 * LEVEL y
      5  FROM dual
      6  CONNECT BY LEVEL <= 1000000;
    Table created.
    SQL> CREATE TABLE test333_dst AS
      2  SELECT * FROM test333 WHERE 1 = 0;
    Table created.
    SQL> set timing on
    SQL> ed
    Wrote file afiedt.buf
      1  BEGIN
      2     FOR i IN 1 .. 100 LOOP
      3        stp1( 'test333', 'test333_dst', 1000, 31000 );
      4     END LOOP;
      5* END;
    SQL> /
    PL/SQL procedure successfully completed.
    Elapsed: 00:00:22.12
    SQL> ed
    Wrote file afiedt.buf
      1  BEGIN
      2     FOR i IN 1 .. 100 LOOP
      3        stp( 'test333', 'test333_dst', 1000, 31000 );
      4     END LOOP;
      5* END;
    SQL> /
    PL/SQL procedure successfully completed.
    Elapsed: 00:00:14.86without bulk collect ~ 15 sec.
    bulk collect version ~ 22 sec. .... 7 sec longer / 15 sec. = about 45% performance decrease.

  • Can I use Bulk Collect results as input parameter for another cursor

    MUSIC            ==> remote MUSIC_DB database, MUSIC table has 60 million rows
    PRICE_DATA ==> remote PRICING_DB database, PRICE_DATE table has 1 billion rows
    These two table once existed in same database, but size of database exceeded available hardware size and hardware budget, so the PRICE_DATA table was moved to another Oracle database.  I need to create a single report that combines data from both of these tables, and a distributed join with DRIVING_SITE hint will not work because the size of both table is too large to push to one DRIVING_SITE location, so I wrote this PLSQL block to process in small blocks.
    QUESTION: how can use bulk collect from one cursor and pass that bulk collected information as input to second cursor without specifically listing each cell of the PLSQL bulk collection?  See sample pseudo-code below, I am trying to determine more efficient way to code than hard-coding 100 parameter names into 2nd cursor.
    NOTE: below is truly pseudo-code, I had to change the names of everything to adhere to NDA, but below works and is fast enough for my purposes, but if I want to change from 100 input parameters to 200, I have to add more hard-coded values.  There has got to be a better way.
    DECLARE
         -- define cursor that retrieves distinct SONG_IDs from MUSIC table in remote music database
         CURSOR C_CURRENT_MUSIC
         IS
        select distinct SONG_ID
        from MUSIC@MUSIC_DB
        where PRODUCTION_RELEASE=1
         /*  define a parameterized cursor that accepts 100 SONG_IDs and retrieves
              required pricing information
         CURSOR C_get_music_price_data
                   P_SONG_ID_001 NUMBER, P_SONG_ID_002 NUMBER, P_SONG_ID_003 NUMBER, P_SONG_ID_004 NUMBER, P_SONG_ID_005 NUMBER, P_SONG_ID_006 NUMBER, P_SONG_ID_007 NUMBER, P_SONG_ID_008 NUMBER, P_SONG_ID_009 NUMBER, P_SONG_ID_010 NUMBER,
                   P_SONG_ID_011 NUMBER, P_SONG_ID_012 NUMBER, P_SONG_ID_013 NUMBER, P_SONG_ID_014 NUMBER, P_SONG_ID_015 NUMBER, P_SONG_ID_016 NUMBER, P_SONG_ID_017 NUMBER, P_SONG_ID_018 NUMBER, P_SONG_ID_019 NUMBER, P_SONG_ID_020 NUMBER,
                   P_SONG_ID_021 NUMBER, P_SONG_ID_022 NUMBER, P_SONG_ID_023 NUMBER, P_SONG_ID_024 NUMBER, P_SONG_ID_025 NUMBER, P_SONG_ID_026 NUMBER, P_SONG_ID_027 NUMBER, P_SONG_ID_028 NUMBER, P_SONG_ID_029 NUMBER, P_SONG_ID_030 NUMBER,
                   P_SONG_ID_031 NUMBER, P_SONG_ID_032 NUMBER, P_SONG_ID_033 NUMBER, P_SONG_ID_034 NUMBER, P_SONG_ID_035 NUMBER, P_SONG_ID_036 NUMBER, P_SONG_ID_037 NUMBER, P_SONG_ID_038 NUMBER, P_SONG_ID_039 NUMBER, P_SONG_ID_040 NUMBER,
                   P_SONG_ID_041 NUMBER, P_SONG_ID_042 NUMBER, P_SONG_ID_043 NUMBER, P_SONG_ID_044 NUMBER, P_SONG_ID_045 NUMBER, P_SONG_ID_046 NUMBER, P_SONG_ID_047 NUMBER, P_SONG_ID_048 NUMBER, P_SONG_ID_049 NUMBER, P_SONG_ID_050 NUMBER,
                   P_SONG_ID_051 NUMBER, P_SONG_ID_052 NUMBER, P_SONG_ID_053 NUMBER, P_SONG_ID_054 NUMBER, P_SONG_ID_055 NUMBER, P_SONG_ID_056 NUMBER, P_SONG_ID_057 NUMBER, P_SONG_ID_058 NUMBER, P_SONG_ID_059 NUMBER, P_SONG_ID_060 NUMBER,
                   P_SONG_ID_061 NUMBER, P_SONG_ID_062 NUMBER, P_SONG_ID_063 NUMBER, P_SONG_ID_064 NUMBER, P_SONG_ID_065 NUMBER, P_SONG_ID_066 NUMBER, P_SONG_ID_067 NUMBER, P_SONG_ID_068 NUMBER, P_SONG_ID_069 NUMBER, P_SONG_ID_070 NUMBER,
                   P_SONG_ID_071 NUMBER, P_SONG_ID_072 NUMBER, P_SONG_ID_073 NUMBER, P_SONG_ID_074 NUMBER, P_SONG_ID_075 NUMBER, P_SONG_ID_076 NUMBER, P_SONG_ID_077 NUMBER, P_SONG_ID_078 NUMBER, P_SONG_ID_079 NUMBER, P_SONG_ID_080 NUMBER,
                   P_SONG_ID_081 NUMBER, P_SONG_ID_082 NUMBER, P_SONG_ID_083 NUMBER, P_SONG_ID_084 NUMBER, P_SONG_ID_085 NUMBER, P_SONG_ID_086 NUMBER, P_SONG_ID_087 NUMBER, P_SONG_ID_088 NUMBER, P_SONG_ID_089 NUMBER, P_SONG_ID_090 NUMBER,
                   P_SONG_ID_091 NUMBER, P_SONG_ID_092 NUMBER, P_SONG_ID_093 NUMBER, P_SONG_ID_094 NUMBER, P_SONG_ID_095 NUMBER, P_SONG_ID_096 NUMBER, P_SONG_ID_097 NUMBER, P_SONG_ID_098 NUMBER, P_SONG_ID_099 NUMBER, P_SONG_ID_100 NUMBER
         IS
         select
         from PRICE_DATA@PRICING_DB
         where COUNTRY = 'USA'
         and START_DATE <= sysdate
         and END_DATE > sysdate
         and vpc.SONG_ID IN
                   P_SONG_ID_001 ,P_SONG_ID_002 ,P_SONG_ID_003 ,P_SONG_ID_004 ,P_SONG_ID_005 ,P_SONG_ID_006 ,P_SONG_ID_007 ,P_SONG_ID_008 ,P_SONG_ID_009 ,P_SONG_ID_010,
                   P_SONG_ID_011 ,P_SONG_ID_012 ,P_SONG_ID_013 ,P_SONG_ID_014 ,P_SONG_ID_015 ,P_SONG_ID_016 ,P_SONG_ID_017 ,P_SONG_ID_018 ,P_SONG_ID_019 ,P_SONG_ID_020,
                   P_SONG_ID_021 ,P_SONG_ID_022 ,P_SONG_ID_023 ,P_SONG_ID_024 ,P_SONG_ID_025 ,P_SONG_ID_026 ,P_SONG_ID_027 ,P_SONG_ID_028 ,P_SONG_ID_029 ,P_SONG_ID_030,
                   P_SONG_ID_031 ,P_SONG_ID_032 ,P_SONG_ID_033 ,P_SONG_ID_034 ,P_SONG_ID_035 ,P_SONG_ID_036 ,P_SONG_ID_037 ,P_SONG_ID_038 ,P_SONG_ID_039 ,P_SONG_ID_040,
                   P_SONG_ID_041 ,P_SONG_ID_042 ,P_SONG_ID_043 ,P_SONG_ID_044 ,P_SONG_ID_045 ,P_SONG_ID_046 ,P_SONG_ID_047 ,P_SONG_ID_048 ,P_SONG_ID_049 ,P_SONG_ID_050,
                   P_SONG_ID_051 ,P_SONG_ID_052 ,P_SONG_ID_053 ,P_SONG_ID_054 ,P_SONG_ID_055 ,P_SONG_ID_056 ,P_SONG_ID_057 ,P_SONG_ID_058 ,P_SONG_ID_059 ,P_SONG_ID_060,
                   P_SONG_ID_061 ,P_SONG_ID_062 ,P_SONG_ID_063 ,P_SONG_ID_064 ,P_SONG_ID_065 ,P_SONG_ID_066 ,P_SONG_ID_067 ,P_SONG_ID_068 ,P_SONG_ID_069 ,P_SONG_ID_070,
                   P_SONG_ID_071 ,P_SONG_ID_072 ,P_SONG_ID_073 ,P_SONG_ID_074 ,P_SONG_ID_075 ,P_SONG_ID_076 ,P_SONG_ID_077 ,P_SONG_ID_078 ,P_SONG_ID_079 ,P_SONG_ID_080,
                   P_SONG_ID_081 ,P_SONG_ID_082 ,P_SONG_ID_083 ,P_SONG_ID_084 ,P_SONG_ID_085 ,P_SONG_ID_086 ,P_SONG_ID_087 ,P_SONG_ID_088 ,P_SONG_ID_089 ,P_SONG_ID_090,
                   P_SONG_ID_091 ,P_SONG_ID_092 ,P_SONG_ID_093 ,P_SONG_ID_094 ,P_SONG_ID_095 ,P_SONG_ID_096 ,P_SONG_ID_097 ,P_SONG_ID_098 ,P_SONG_ID_099 ,P_SONG_ID_100
         group by
               vpc.SONG_ID
              ,vpc.STOREFRONT_ID
         TYPE SONG_ID_TYPE IS TABLE OF MUSIC@MUSIC_DB%TYPE INDEX BY BINARY_INTEGER;
         V_SONG_ID_ARRAY                         SONG_ID_TYPE                     ;
         v_commit_counter           NUMBER := 0;
    BEGIN
         /* open cursor you intent to bulk collect from */
         OPEN C_CURRENT_MUSIC;
         LOOP
              /* in batches of 100, bulk collect ADAM_ID mapped TMS_IDENTIFIER into PLSQL table or records */
              FETCH C_CURRENT_MUSIC BULK COLLECT INTO V_SONG_ID_ARRAY LIMIT 100;
                   EXIT WHEN V_SONG_ID_ARRAY.COUNT = 0;
                   /* to avoid NO DATA FOUND error when pass 100 parameters to OPEN cursor, if the arrary
                      is not fully populated to 100, pad the array with nulls to fill up to 100 cells. */
                   IF (V_SONG_ID_ARRAY.COUNT >=1 and V_SONG_ID_ARRAY.COUNT <> 100) THEN
                        FOR j IN V_SONG_ID_ARRAY.COUNT+1..100 LOOP
                             V_SONG_ID_ARRAY(j) := null;
                        END LOOP;
                   END IF;
              /* pass a batch of 100 to cursor that get price information per SONG_ID and STOREFRONT_ID */
              FOR j IN C_get_music_price_data
                        V_SONG_ID_ARRAY(1) ,V_SONG_ID_ARRAY(2) ,V_SONG_ID_ARRAY(3) ,V_SONG_ID_ARRAY(4) ,V_SONG_ID_ARRAY(5) ,V_SONG_ID_ARRAY(6) ,V_SONG_ID_ARRAY(7) ,V_SONG_ID_ARRAY(8) ,V_SONG_ID_ARRAY(9) ,V_SONG_ID_ARRAY(10) ,
                        V_SONG_ID_ARRAY(11) ,V_SONG_ID_ARRAY(12) ,V_SONG_ID_ARRAY(13) ,V_SONG_ID_ARRAY(14) ,V_SONG_ID_ARRAY(15) ,V_SONG_ID_ARRAY(16) ,V_SONG_ID_ARRAY(17) ,V_SONG_ID_ARRAY(18) ,V_SONG_ID_ARRAY(19) ,V_SONG_ID_ARRAY(20) ,
                        V_SONG_ID_ARRAY(21) ,V_SONG_ID_ARRAY(22) ,V_SONG_ID_ARRAY(23) ,V_SONG_ID_ARRAY(24) ,V_SONG_ID_ARRAY(25) ,V_SONG_ID_ARRAY(26) ,V_SONG_ID_ARRAY(27) ,V_SONG_ID_ARRAY(28) ,V_SONG_ID_ARRAY(29) ,V_SONG_ID_ARRAY(30) ,
                        V_SONG_ID_ARRAY(31) ,V_SONG_ID_ARRAY(32) ,V_SONG_ID_ARRAY(33) ,V_SONG_ID_ARRAY(34) ,V_SONG_ID_ARRAY(35) ,V_SONG_ID_ARRAY(36) ,V_SONG_ID_ARRAY(37) ,V_SONG_ID_ARRAY(38) ,V_SONG_ID_ARRAY(39) ,V_SONG_ID_ARRAY(40) ,
                        V_SONG_ID_ARRAY(41) ,V_SONG_ID_ARRAY(42) ,V_SONG_ID_ARRAY(43) ,V_SONG_ID_ARRAY(44) ,V_SONG_ID_ARRAY(45) ,V_SONG_ID_ARRAY(46) ,V_SONG_ID_ARRAY(47) ,V_SONG_ID_ARRAY(48) ,V_SONG_ID_ARRAY(49) ,V_SONG_ID_ARRAY(50) ,
                        V_SONG_ID_ARRAY(51) ,V_SONG_ID_ARRAY(52) ,V_SONG_ID_ARRAY(53) ,V_SONG_ID_ARRAY(54) ,V_SONG_ID_ARRAY(55) ,V_SONG_ID_ARRAY(56) ,V_SONG_ID_ARRAY(57) ,V_SONG_ID_ARRAY(58) ,V_SONG_ID_ARRAY(59) ,V_SONG_ID_ARRAY(60) ,
                        V_SONG_ID_ARRAY(61) ,V_SONG_ID_ARRAY(62) ,V_SONG_ID_ARRAY(63) ,V_SONG_ID_ARRAY(64) ,V_SONG_ID_ARRAY(65) ,V_SONG_ID_ARRAY(66) ,V_SONG_ID_ARRAY(67) ,V_SONG_ID_ARRAY(68) ,V_SONG_ID_ARRAY(69) ,V_SONG_ID_ARRAY(70) ,
                        V_SONG_ID_ARRAY(71) ,V_SONG_ID_ARRAY(72) ,V_SONG_ID_ARRAY(73) ,V_SONG_ID_ARRAY(74) ,V_SONG_ID_ARRAY(75) ,V_SONG_ID_ARRAY(76) ,V_SONG_ID_ARRAY(77) ,V_SONG_ID_ARRAY(78) ,V_SONG_ID_ARRAY(79) ,V_SONG_ID_ARRAY(80) ,
                        V_SONG_ID_ARRAY(81) ,V_SONG_ID_ARRAY(82) ,V_SONG_ID_ARRAY(83) ,V_SONG_ID_ARRAY(84) ,V_SONG_ID_ARRAY(85) ,V_SONG_ID_ARRAY(86) ,V_SONG_ID_ARRAY(87) ,V_SONG_ID_ARRAY(88) ,V_SONG_ID_ARRAY(89) ,V_SONG_ID_ARRAY(90) ,
                        V_SONG_ID_ARRAY(91) ,V_SONG_ID_ARRAY(92) ,V_SONG_ID_ARRAY(93) ,V_SONG_ID_ARRAY(94) ,V_SONG_ID_ARRAY(95) ,V_SONG_ID_ARRAY(96) ,V_SONG_ID_ARRAY(97) ,V_SONG_ID_ARRAY(98) ,V_SONG_ID_ARRAY(99) ,V_SONG_ID_ARRAY(100)        
              LOOP
                   /* do stuff with data from Song and Pricing Database coming from the two
                        separate cursors, then continue processing more rows...
              END LOOP;
              /* commit after each batch of 100 SONG_IDs is processed */        
              COMMIT;
              EXIT WHEN C_CURRENT_MUSIC%NOTFOUND;  -- exit when there are no more rows to fetch from cursor
         END LOOP; -- bulk fetching loop
         CLOSE C_CURRENT_MUSIC; -- close cursor that was used in bulk collection
         /* commit rows */
         COMMIT; -- commit any remaining uncommitted data.
    END;

    I've got a problem when using passing VARRAY of numbers as parameter to remote cursor: it takes a super long time to run, sometimes doesn't finish even after an hour as passed.
    Continuing with my example in original entry, I replaced the bulk collect into PLSQL table collection with a VARRAY and i bulk collect into the VARRAY, this is fast and I know it works because I can DBMS_OUTPUT.PUT_LINE cells of VARRAY so I know it is getting populated correctly.  However, when I pass the VARRAY containing 100 cells populated with SONG_IDs as parameter to cursor, execution time is over an hour and when I am expecting a few seconds.
    Below code example strips the problem down to it's raw details, I skip the bulk collect and just manually populate a VARRAY with 100 SONG_ID values, then try to pass to as parameter to a cursor, but the execution time of cursor is unexpectedly long, over 30 minutes, sometime longer, when I am expecting seconds.
    IMPORTANT: If I take the same 100 SONG_IDs and place them directly in the cursor query's where IN clause, the SQL runs in under 5 seconds and returns result.  Also, if I pass the 100 SONG_IDs as individual cells of a PLSQL table collection, then it also runs fast.
    I thought that since the VARRAY is used via select subquery that is it queried locally, but the cursor is remote, and that I had a distribute problem on my hands, so I put in the DRIVING_SITE hint to attempt to force the result of query against VARRAY to go to remote server and rest of query will run there before returning result, but that didn't work either, still got slow response.
    Is something wrong with my code, or I am running into a Oracle problem that may require support to resolve?
    DECLARE
         /*  define a parameterized cursor that accepts XXX number of in SONG_IDs and
          retrieves required pricing information
         CURSOR C_get_music_price_data
      p_array_song_ids SYS.ODCInumberList              
         IS
         select  /*+DRIVING_SITE(pd) */
      count(distinct s.EVE_ID)
         from PRICE_DATA@PRICING_DB pd
         where pd.COUNTRY = 'USA'
         and pd.START_DATE <= sysdate
         and pd.END_DATE > sysdate
         and pd.SONG_ID IN
              select column_value from table(p_array_song_ids)
         group by
               pd.SONG_ID
              ,pd.STOREFRONT_ID
      V_ARRAY_SONG_IDS SYS.ODCInumberList := SYS.ODCInumberList();    
    BEGIN
    V_ARRAY_SONG_IDS.EXTEND(100);
    V_ARRAY_SONG_IDS(  1 ) := 31135  ;
    V_ARRAY_SONG_IDS(  2 ) := 31140   ;
    V_ARRAY_SONG_IDS(  3 ) := 31142   ;
    V_ARRAY_SONG_IDS(  4 ) := 31144   ;
    V_ARRAY_SONG_IDS(  5 ) := 31146   ;
    V_ARRAY_SONG_IDS(  6 ) := 31148   ;
    V_ARRAY_SONG_IDS(  7 ) := 31150   ;
    V_ARRAY_SONG_IDS(  8 ) := 31152   ;
    V_ARRAY_SONG_IDS(  9 ) := 31154   ;
    V_ARRAY_SONG_IDS( 10 ) := 31156   ;
    V_ARRAY_SONG_IDS( 11 ) := 31158   ;
    V_ARRAY_SONG_IDS( 12 ) := 31160   ;
    V_ARRAY_SONG_IDS( 13 ) := 33598   ;
    V_ARRAY_SONG_IDS( 14 ) := 33603   ;
    V_ARRAY_SONG_IDS( 15 ) := 33605   ;
    V_ARRAY_SONG_IDS( 16 ) := 33607   ;
    V_ARRAY_SONG_IDS( 17 ) := 33609   ;
    V_ARRAY_SONG_IDS( 18 ) := 33611   ;
    V_ARRAY_SONG_IDS( 19 ) := 33613   ;
    V_ARRAY_SONG_IDS( 20 ) := 33615   ;
    V_ARRAY_SONG_IDS( 21 ) := 33617   ;
    V_ARRAY_SONG_IDS( 22 ) := 33630   ;
    V_ARRAY_SONG_IDS( 23 ) := 33632   ;
    V_ARRAY_SONG_IDS( 24 ) := 33636   ;
    V_ARRAY_SONG_IDS( 25 ) := 33638   ;
    V_ARRAY_SONG_IDS( 26 ) := 33640   ;
    V_ARRAY_SONG_IDS( 27 ) := 33642   ;
    V_ARRAY_SONG_IDS( 28 ) := 33644   ;
    V_ARRAY_SONG_IDS( 29 ) := 33646   ;
    V_ARRAY_SONG_IDS( 30 ) := 33648   ;
    V_ARRAY_SONG_IDS( 31 ) := 33662   ;
    V_ARRAY_SONG_IDS( 32 ) := 33667   ;
    V_ARRAY_SONG_IDS( 33 ) := 33669   ;
    V_ARRAY_SONG_IDS( 34 ) := 33671   ;
    V_ARRAY_SONG_IDS( 35 ) := 33673   ;
    V_ARRAY_SONG_IDS( 36 ) := 33675   ;
    V_ARRAY_SONG_IDS( 37 ) := 33677   ;
    V_ARRAY_SONG_IDS( 38 ) := 33679   ;
    V_ARRAY_SONG_IDS( 39 ) := 33681   ;
    V_ARRAY_SONG_IDS( 40 ) := 33683   ;
    V_ARRAY_SONG_IDS( 41 ) := 33685   ;
    V_ARRAY_SONG_IDS( 42 ) := 33700   ;
    V_ARRAY_SONG_IDS( 43 ) := 33702   ;
    V_ARRAY_SONG_IDS( 44 ) := 33704   ;
    V_ARRAY_SONG_IDS( 45 ) := 33706   ;
    V_ARRAY_SONG_IDS( 46 ) := 33708   ;
    V_ARRAY_SONG_IDS( 47 ) := 33710   ;
    V_ARRAY_SONG_IDS( 48 ) := 33712   ;
    V_ARRAY_SONG_IDS( 49 ) := 33723   ;
    V_ARRAY_SONG_IDS( 50 ) := 33725   ;
    V_ARRAY_SONG_IDS( 51 ) := 33727   ;
    V_ARRAY_SONG_IDS( 52 ) := 33729   ;
    V_ARRAY_SONG_IDS( 53 ) := 33731   ;
    V_ARRAY_SONG_IDS( 54 ) := 33733   ;
    V_ARRAY_SONG_IDS( 55 ) := 33735   ;
    V_ARRAY_SONG_IDS( 56 ) := 33737   ;
    V_ARRAY_SONG_IDS( 57 ) := 33749   ;
    V_ARRAY_SONG_IDS( 58 ) := 33751   ;
    V_ARRAY_SONG_IDS( 59 ) := 33753   ;
    V_ARRAY_SONG_IDS( 60 ) := 33755   ;
    V_ARRAY_SONG_IDS( 61 ) := 33757   ;
    V_ARRAY_SONG_IDS( 62 ) := 33759   ;
    V_ARRAY_SONG_IDS( 63 ) := 33761   ;
    V_ARRAY_SONG_IDS( 64 ) := 33763   ;
    V_ARRAY_SONG_IDS( 65 ) := 33775   ;
    V_ARRAY_SONG_IDS( 66 ) := 33777   ;
    V_ARRAY_SONG_IDS( 67 ) := 33779   ;
    V_ARRAY_SONG_IDS( 68 ) := 33781   ;
    V_ARRAY_SONG_IDS( 69 ) := 33783   ;
    V_ARRAY_SONG_IDS( 70 ) := 33785   ;
    V_ARRAY_SONG_IDS( 71 ) := 33787   ;
    V_ARRAY_SONG_IDS( 72 ) := 33789   ;
    V_ARRAY_SONG_IDS( 73 ) := 33791   ;
    V_ARRAY_SONG_IDS( 74 ) := 33793   ;
    V_ARRAY_SONG_IDS( 75 ) := 33807   ;
    V_ARRAY_SONG_IDS( 76 ) := 33809   ;
    V_ARRAY_SONG_IDS( 77 ) := 33811   ;
    V_ARRAY_SONG_IDS( 78 ) := 33813   ;
    V_ARRAY_SONG_IDS( 79 ) := 33815   ;
    V_ARRAY_SONG_IDS( 80 ) := 33817   ;
    V_ARRAY_SONG_IDS( 81 ) := 33819   ;
    V_ARRAY_SONG_IDS( 82 ) := 33821   ;
    V_ARRAY_SONG_IDS( 83 ) := 33823   ;
    V_ARRAY_SONG_IDS( 84 ) := 33825   ;
    V_ARRAY_SONG_IDS( 85 ) := 33839   ;
    V_ARRAY_SONG_IDS( 86 ) := 33844   ;
    V_ARRAY_SONG_IDS( 87 ) := 33846   ;
    V_ARRAY_SONG_IDS( 88 ) := 33848   ;
    V_ARRAY_SONG_IDS( 89 ) := 33850   ;
    V_ARRAY_SONG_IDS( 90 ) := 33852   ;
    V_ARRAY_SONG_IDS( 91 ) := 33854   ;
    V_ARRAY_SONG_IDS( 92 ) := 33856   ;
    V_ARRAY_SONG_IDS( 93 ) := 33858   ;
    V_ARRAY_SONG_IDS( 94 ) := 33860   ;
    V_ARRAY_SONG_IDS( 95 ) := 33874   ;
    V_ARRAY_SONG_IDS( 96 ) := 33879   ;
    V_ARRAY_SONG_IDS( 97 ) := 33881   ;
    V_ARRAY_SONG_IDS( 98 ) := 33883   ;
    V_ARRAY_SONG_IDS( 99 ) := 33885   ;
    V_ARRAY_SONG_IDS(100 ) := 33889  ;
        /* do stuff with data from Song and Pricing Database coming from the two
      separate cursors, then continue processing more rows...
      FOR i IN C_get_music_price_data( v_array_song_ids ) LOOP
      . (this is the loop where I pass in v_array_song_ids
      .  populated with only 100 cells and it runs forever)
      END LOOP; 
    END;

  • Generic bulk collect function

    I've ran into a scenario in Forms (10g) where some of the embedded plsql could benefit from the use of Bulk Collects. Now, as I've just found out, Forms doesn't like Bulks in it's embedded code, returns a 'feature not supported in client...' error.
    I attempted to write a function in the db (10g), to be called from the Form, accept a ref cursor parameter, Fetch the cursor contents using bulk into an array, then return the filled array to the Form. (It may not be performant either, but I'll never know because I didn't get it working.)
    However, fatally, I can't create a record array in the function without knowing the structure of the cursor. I've googled, and not found any way to do it.
    So, before I give up on it and go for a different solution, I just thought I'd check here to see if anyone else has attempted this? Or am I right in now thinking it's just not do-able.
    thanks in advance.

    Not a good idea to bulk collect in PL/SQL on the behalf of an external client like Forms.
    A bulk collect does a single thing only - it reduces the context switching between the PL/SQL and SQL engines. If you fetch a 1000 rows one at a time, then that fetch statement will cause a 1000 context switches as PL/SQL needs to step into the SQL engine in order to copy that row's data into PL/SQL.
    If you do a bulk fetch of a 100 rows/fetch, then you reduce the number of context switches to 10. A significant reduction.
    Okay, so now you hook Forms (or Java/Delphi/C#/etc) into this chain and push that bulk collection from a PL/SQL variable to this client. What do you achieve? Not much as you're now offsetting that reduction in context switches with more memory that needs to be used (that PL/SQL collection buffer needs PGA memory), and you add the overheads of first pulling the data from the db buffer cache into PL/SQL and then to the client.
    PL/SQL makes a very poor data cache for a client - the db buffer cache was designed exactly for that purpose. And is far superior than anything we can code in PL/SQL for the same role.
    It is much simpler, more robust, to rather fetch the data directly from the db buffer cache - no intermediate PL/SQL caching and buffering when fetching data. This will also scale better and perform better.
    The ideal is to use PL/SQL to implement a business logic layer, security, pre-processing, validation and other good stuff for the client - and then return a ref cursor to the client. Allowing the client to use that prepared cursor to fetch data directly from the db buffer cache.

  • Oracle sql query - bulk collect into a 50 element array,  to print them.

    INSERT INTO temp_mobile_donors_au
    select distinct d.donor_id
    from [email protected] d,
    blood_drives b,
    ds_malaria_travel_history h
    where b.site_code = 'NA19'
    and d.coll_date between trunc(add_months(sysdate,-24))
    and trunc(sysdate)
    and d.drive_id = 'DRV'||b.drive_id
    and h.donor_id = d.donor_id;
    I have 105 donors. I am taking these donors and have to print a report. I was using loop to print the donor sheet for each donor. Usually we have about 20 to 30 donors. So reports prints without any problem. Now we have about 105 donors and may be next time we may have more. So report does not print for all donors.
    I print reports using this command
    Cursor C2 is
    select distinct donor_id
    from temp_mobile_donors_au;
    Begin
    Open C2;
    Loop
    Fetch C2 into :param_donor_id;
    Exit when C2%notfound;
    print_mobile_donor_sheets_au;
    End loop;
    Close C2;
    End;
    procedure for print_mobile_donor_sheet_au----- is as below
    v_url_c := 'http://'||v_rep_server_c||'report=r_ds_travel_history1&param_donor_id='||:control.param_donor_id;
    Is there a way to iterate through a single cursor 50 rows at a time, bulk collect into a 50 element array, print them, bulk collect the next batch.
    Any idea or help?

    Here is a bulk-collect-limit example that got by googling:
    SET SERVEROUTPUT ON
    DECLARE
      TYPE t_bulk_collect_test_tab IS TABLE OF bulk_collect_test%ROWTYPE;
      l_tab    t_bulk_collect_test_tab;
      CURSOR c_data IS
        SELECT *
        FROM   bulk_collect_test;
    BEGIN
      OPEN c_data;
      LOOP
        FETCH c_data
        BULK COLLECT INTO l_tab LIMIT 10000; /*I think this is what you are looking for*/
        -- Process contents of collection here.
        DBMS_OUTPUT.put_line(l_tab.count || ‘ rows’);
        EXIT WHEN c_data%NOTFOUND;
      END LOOP
      CLOSE c_data;
    END;
    /So you need to three things:
    1.  TYPE t_bulk_collect_test_tab IS TABLE OF bulk_collect_test%ROWTYPE;
    2.  l_tab    t_bulk_collect_test_tab;
    3. FETCH c_data
             BULK COLLECT INTO l_tab LIMIT 10000; /* use your reqd value */Change your code and try.
    HTH

  • Need to increase performance-bulk collect in cursor with limit and in the for loop inserting into the trigger table

    Hi all,
    I have a performance issue in the below code,where i am trying to insert the data from table_stg into target_tab and in parent_tab tables and then to child tables via cursor with bulk collect .the target_tab and parent_tab are huge tables and have a row wise trigger enabled on it .the trigger is mandatory . This timetaken for this block to execute is 5000 seconds.Now my requirement is to reduce it to 5 to 10 mins.
    can someone please guide me here.Its bit urgent .Awaiting for your response.
    declare
    vmax_Value NUMBER(5);
      vcnt number(10);
      id_val number(20);
      pc_id number(15);
      vtable_nm VARCHAR2(100);
      vstep_no  VARCHAR2(10);
      vsql_code VARCHAR2(10);
      vsql_errm varchar2(200);
      vtarget_starttime timestamp;
      limit_in number :=10000;
      idx           number(10);
              cursor stg_cursor is
             select
                   DESCRIPTION,
                   SORT_CODE,
                   ACCOUNT_NUMBER,
                     to_number(to_char(CORRESPONDENCE_DATE,'DD')) crr_day,
                     to_char(CORRESPONDENCE_DATE,'MONTH') crr_month,
                     to_number(substr(to_char(CORRESPONDENCE_DATE,'DD-MON-YYYY'),8,4)) crr_year,
                   PARTY_ID,
                   GUID,
                   PAPERLESS_REF_IND,
                   PRODUCT_TYPE,
                   PRODUCT_BRAND,
                   PRODUCT_HELD_ID,
                   NOTIFICATION_PREF,
                   UNREAD_CORRES_PERIOD,
                   EMAIL_ID,
                   MOBILE_NUMBER,
                   TITLE,
                   SURNAME,
                   POSTCODE,
                   EVENT_TYPE,
                   PRIORITY_IND,
                   SUBJECT,
                   EXT_PRD_ID_TX,
                   EXT_PRD_HLD_ID_TX,
                   EXT_SYS_ID,
                   EXT_PTY_ID_TX,
                   ACCOUNT_TYPE_CD,
                   COM_PFR_TYP_TX,
                   COM_PFR_OPT_TX,
                   COM_PFR_RSN_CD
             from  table_stg;
    type rec_type is table of stg_rec_type index by pls_integer;
    v_rt_all_cols rec_type;
    BEGIN
      vstep_no   := '0';
      vmax_value := 0;
      vtarget_starttime := systimestamp;
      id_val    := 0;
      pc_id     := 0;
      success_flag := 0;
              vstep_no  := '1';
              vtable_nm := 'before cursor';
        OPEN stg_cursor;
              vstep_no  := '2';
              vtable_nm := 'After cursor';
       LOOP
              vstep_no  := '3';
              vtable_nm := 'before fetch';
    --loop
        FETCH stg_cursor BULK COLLECT INTO v_rt_all_cols LIMIT limit_in;
                  vstep_no  := '4';
                  vtable_nm := 'after fetch';
    --EXIT WHEN v_rt_all_cols.COUNT = 0;
        EXIT WHEN stg_cursor%NOTFOUND;
    FOR i IN 1 .. v_rt_all_cols.COUNT
      LOOP
       dbms_output.put_line(upper(v_rt_all_cols(i).event_type));
        if (upper(v_rt_all_cols(i).event_type) = upper('System_enforced')) then
                  vstep_no  := '4.1';
                  vtable_nm := 'before seq sel';
              select PC_SEQ.nextval into pc_id from dual;
                  vstep_no  := '4.2';
                  vtable_nm := 'before insert corres';
              INSERT INTO target1_tab
                           (ID,
                            PARTY_ID,
                            PRODUCT_BRAND,
                            SORT_CODE,
                            ACCOUNT_NUMBER,
                            EXT_PRD_ID_TX,         
                            EXT_PRD_HLD_ID_TX,
                            EXT_SYS_ID,
                            EXT_PTY_ID_TX,
                            ACCOUNT_TYPE_CD,
                            COM_PFR_TYP_TX,
                            COM_PFR_OPT_TX,
                            COM_PFR_RSN_CD,
                            status)
             VALUES
                            (pc_id,
                             v_rt_all_cols(i).party_id,
                             decode(v_rt_all_cols(i).product_brand,'LTB',2,'HLX',1,'HAL',1,'BOS',3,'VER',4,0),
                             v_rt_all_cols(i).sort_code,
                             'XXXX'||substr(trim(v_rt_all_cols(i).ACCOUNT_NUMBER),length(trim(v_rt_all_cols(i).ACCOUNT_NUMBER))-3,4),
                             v_rt_all_cols(i).EXT_PRD_ID_TX,
                             v_rt_all_cols(i).EXT_PRD_HLD_ID_TX,
                             v_rt_all_cols(i).EXT_SYS_ID,
                             v_rt_all_cols(i).EXT_PTY_ID_TX,
                             v_rt_all_cols(i).ACCOUNT_TYPE_CD,
                             v_rt_all_cols(i).COM_PFR_TYP_TX,
                             v_rt_all_cols(i).COM_PFR_OPT_TX,
                             v_rt_all_cols(i).COM_PFR_RSN_CD,
                             NULL);
                  vstep_no  := '4.3';
                  vtable_nm := 'after insert corres';
        else
              select COM_SEQ.nextval into id_val from dual;
                  vstep_no  := '6';
                  vtable_nm := 'before insertcomm';
          if (upper(v_rt_all_cols(i).event_type) = upper('REMINDER')) then
                vstep_no  := '6.01';
                  vtable_nm := 'after if insertcomm';
              insert into parent_tab
                 (ID ,
                 CTEM_CODE,
                 CHA_CODE,            
                 CT_CODE,                           
                 CONTACT_POINT_ID,             
                 SOURCE,
                 RECEIVED_DATE,                             
                 SEND_DATE,
                 RETRY_COUNT)
              values
                 (id_val,
                  lower(v_rt_all_cols(i).event_type), 
                  decode(v_rt_all_cols(i).product_brand,'LTB',2,'HLX',1,'HAL',1,'BOS',3,'VER',4,0),
                  'Email',
                  v_rt_all_cols(i).email_id,
                  'IADAREMINDER',
                  systimestamp,
                  systimestamp,
                  0);  
         else
                vstep_no  := '6.02';
                  vtable_nm := 'after else insertcomm';
              insert into parent_tab
                 (ID ,
                 CTEM_CODE,
                 CHA_CODE,            
                 CT_CODE,                           
                 CONTACT_POINT_ID,             
                 SOURCE,
                 RECEIVED_DATE,                             
                 SEND_DATE,
                 RETRY_COUNT)
              values
                 (id_val,
                  lower(v_rt_all_cols(i).event_type), 
                  decode(v_rt_all_cols(i).product_brand,'LTB',2,'HLX',1,'HAL',1,'BOS',3,'VER',4,0),
                  'Email',
                  v_rt_all_cols(i).email_id,
                  'CORRESPONDENCE',
                  systimestamp,
                  systimestamp,
                  0); 
            END if; 
                  vstep_no  := '6.11';
                  vtable_nm := 'before chop';
             if (v_rt_all_cols(i).ACCOUNT_NUMBER is not null) then 
                      v_rt_all_cols(i).ACCOUNT_NUMBER := 'XXXX'||substr(trim(v_rt_all_cols(i).ACCOUNT_NUMBER),length(trim(v_rt_all_cols(i).ACCOUNT_NUMBER))-3,4);
              insert into child_tab
                 (COM_ID,                                            
                 KEY,                                                                                                                                            
                 VALUE)
              values
                (id_val,
                 'IB.Correspondence.AccountNumberMasked',
                 v_rt_all_cols(i).ACCOUNT_NUMBER);
             end if;
                  vstep_no  := '6.1';
                  vtable_nm := 'before stateday';
             if (v_rt_all_cols(i).crr_day is not null) then 
              insert into child_tab
                 (COM_ID,                                            
                 KEY,                                                                                                                                            
                 VALUE)
              values
                (id_val,
                 --'IB.Correspondence.Date.Day',
                 'IB.Crsp.Date.Day',
                 v_rt_all_cols(i).crr_day);
             end if;
                  vstep_no  := '6.2';
                  vtable_nm := 'before statemth';
             if (v_rt_all_cols(i).crr_month is not null) then 
              insert into child_tab
                 (COM_ID,                                            
                 KEY,                                                                                                                                            
                 VALUE)
              values
                (id_val,
                 --'IB.Correspondence.Date.Month',
                 'IB.Crsp.Date.Month',
                 v_rt_all_cols(i).crr_month);
             end if;
                  vstep_no  := '6.3';
                  vtable_nm := 'before stateyear';
             if (v_rt_all_cols(i).crr_year is not null) then 
              insert into child_tab
                 (COM_ID,                                            
                 KEY,                                                                                                                                            
                 VALUE)
              values
                (id_val,
                 --'IB.Correspondence.Date.Year',
                 'IB.Crsp.Date.Year',
                 v_rt_all_cols(i).crr_year);
             end if;
                  vstep_no  := '7';
                  vtable_nm := 'before type';
               if (v_rt_all_cols(i).product_type is not null) then
                  insert into child_tab
                     (COM_ID,                                            
                     KEY,                                                                                                                                        
                     VALUE)
                  values
                    (id_val,
                     'IB.Product.ProductName',
                   v_rt_all_cols(i).product_type);
                end if;
                  vstep_no  := '9';
                  vtable_nm := 'before title';         
              if (trim(v_rt_all_cols(i).title) is not null) then
              insert into child_tab
                 (COM_ID,                                            
                 KEY,                                                                                                                                            
                 VALUE )
              values
                (id_val,
                 'IB.Customer.Title',
                 trim(v_rt_all_cols(i).title));
              end if;
                  vstep_no  := '10';
                  vtable_nm := 'before surname';
              if (v_rt_all_cols(i).surname is not null) then
                insert into child_tab
                   (COM_ID,                                            
                   KEY,                                                                                                                                          
                   VALUE)
                values
                  (id_val,
                  'IB.Customer.LastName',
                  v_rt_all_cols(i).surname);
              end if;
                            vstep_no  := '12';
                            vtable_nm := 'before postcd';
              if (trim(v_rt_all_cols(i).POSTCODE) is not null) then
              insert into child_tab
                 (COM_ID,                                            
                 KEY,                                                                                                                                            
                 VALUE)                              
               values
                (id_val,
                 'IB.Customer.Addr.PostCodeMasked',
                  substr(replace(v_rt_all_cols(i).POSTCODE,' ',''),length(replace(v_rt_all_cols(i).POSTCODE,' ',''))-2,3));
              end if;
                            vstep_no  := '13';
                            vtable_nm := 'before subject';
              if (trim(v_rt_all_cols(i).SUBJECT) is not null) then
              insert into child_tab
                 (COM_ID,                                            
                 KEY,                                                                                                                                            
                 VALUE)                              
               values
                (id_val,
                 'IB.Correspondence.Subject',
                  v_rt_all_cols(i).subject);
              end if;
                            vstep_no  := '14';
                            vtable_nm := 'before inactivity';
              if (trim(v_rt_all_cols(i).UNREAD_CORRES_PERIOD) is null or
                  trim(v_rt_all_cols(i).UNREAD_CORRES_PERIOD) = '3' or
                  trim(v_rt_all_cols(i).UNREAD_CORRES_PERIOD) = '6' or
                  trim(v_rt_all_cols(i).UNREAD_CORRES_PERIOD) = '9') then
              insert into child_tab
                 (COM_ID,                                            
                 KEY,                                                                                                                                            
                 VALUE)                              
               values
                (id_val,
                 'IB.Correspondence.Inactivity',
                  v_rt_all_cols(i).UNREAD_CORRES_PERIOD);
              end if;
                          vstep_no  := '14.1';
                          vtable_nm := 'after notfound';
        end if;
                          vstep_no  := '15';
                          vtable_nm := 'after notfound';
        END LOOP;
        end loop;
                          vstep_no  := '16';
                          vtable_nm := 'before closecur';
        CLOSE stg_cursor;
                          vstep_no  := '17';
                          vtable_nm := 'before commit';
        DELETE FROM table_stg;
      COMMIT;
                          vstep_no  := '18';
                          vtable_nm := 'after commit';
    EXCEPTION
    WHEN OTHERS THEN
      ROLLBACK;
      success_flag := 1;
      vsql_code := SQLCODE;
      vsql_errm := SUBSTR(sqlerrm,1,200);
      error_logging_pkg.inserterrorlog('samp',vsql_code,vsql_errm, vtable_nm,vstep_no);
      RAISE_APPLICATION_ERROR (-20011, 'samp '||vstep_no||' SQLERRM:'||SQLERRM);
    end;
    Thanks

    Its bit urgent
    NO - it is NOT urgent. Not to us.
    If you have an urgent problem you need to hire a consultant.
    I have a performance issue in the below code,
    Maybe you do and maybe you don't. How are we to really know? You haven't posted ANYTHING indicating that a performance issue exists. Please read the FAQ for how to post a tuning request and the info you need to provide. First and foremost you have to post SOMETHING that actually shows that a performance issue exists. Troubleshooting requires FACTS not just a subjective opinion.
    where i am trying to insert the data from table_stg into target_tab and in parent_tab tables and then to child tables via cursor with bulk collect .the target_tab and parent_tab are huge tables and have a row wise trigger enabled on it .the trigger is mandatory . This timetaken for this block to execute is 5000 seconds.Now my requirement is to reduce it to 5 to 10 mins.
    Personally I think 5000 seconds (about 1 hr 20 minutes) is very fast for processing 800 trillion rows of data into parent and child tables. Why do you think that is slow?
    Your code has several major flaws that need to be corrected before you can even determine what, if anything, needs to be tuned.
    This code has the EXIT statement at the beginning of the loop instead of at the end
        FETCH stg_cursor BULK COLLECT INTO v_rt_all_cols LIMIT limit_in;
                  vstep_no  := '4';
                  vtable_nm := 'after fetch';
    --EXIT WHEN v_rt_all_cols.COUNT = 0;
        EXIT WHEN stg_cursor%NOTFOUND;
    The correct place for the %NOTFOUND test when using BULK COLLECT is at the END of the loop; that is, the last statement in the loop.
    You can use a COUNT test at the start of the loop but ironically you have commented it out and have now done it wrong. Either move the NOTFOUND test to the end of the loop or remove it and uncomment the COUNT test.
    WHEN OTHERS THEN
      ROLLBACK;
    That basically says you don't even care what problem occurs or whether the problem is for a single record of your 10,000 in the collection. You pretty much just throw away any stack trace and substitute your own message.
    Your code also has NO exception handling for any of the individual steps or blocks of code.
    The code you posted also begs the question of why you are using NAME=VALUE pairs for child data rows? Why aren't you using a standard relational table for this data?
    As others have noted you are using slow-by-slow (row by row processing). Let's assume that PL/SQL, the bulk collect and row-by-row is actually necessary.
    Then you should be constructing the parent and child records into collections and then inserting them in BULK using FORALL.
    1. Create a collection for the new parent rows
    2. Create a collection for the new child rows
    3. For each set of LIMIT source row data
      a. empty the parent and child collections
      b. populate those collections with new parent/child data
      c. bulk insert the parent collection into the parent table
      d. bulk insert the child collection into the child table
    And unless you really want to either load EVERYTHING or abandon everything you should use bulk exception handling so that the clean data gets processed and only the dirty data gets rejected.

  • Doubt about Bulk Collect with LIMIT

    Hi
    I have a Doubt about Bulk collect , When is done Commit
    I Get a example in PSOUG
    http://psoug.org/reference/array_processing.html
    CREATE TABLE servers2 AS
    SELECT *
    FROM servers
    WHERE 1=2;
    DECLARE
    CURSOR s_cur IS
    SELECT *
    FROM servers;
    TYPE fetch_array IS TABLE OF s_cur%ROWTYPE;
    s_array fetch_array;
    BEGIN
      OPEN s_cur;
      LOOP
        FETCH s_cur BULK COLLECT INTO s_array LIMIT 1000;
        FORALL i IN 1..s_array.COUNT
        INSERT INTO servers2 VALUES s_array(i);
        EXIT WHEN s_cur%NOTFOUND;
      END LOOP;
      CLOSE s_cur;
      COMMIT;
    END;If my table Servers have 3 000 000 records , when is done commit ? when insert all records ?
    could crash redo log ?
    using 9.2.08

    muttleychess wrote:
    If my table Servers have 3 000 000 records , when is done commit ? Commit point has nothing to do with how many rows you process. It is purely business driven. Your code implements some business transaction, right? So if you commit before whole trancaction (from business standpoint) is complete other sessions will already see changes that are (from business standpoint) incomplete. Also, what if rest of trancaction (from business standpoint) fails?
    SY.

  • How to handle the bad record while using bulk collect with limit.

    Hi
    How to handle the Bad record as part of the insertion/updation to avoid the transaction.
    Example:
    I am inserting into table with LIMIT of 1000 records and i've got error at 588th record.
    i want to commit the transaction with 588 inserted record in table and log the error into
    error logging table then i've to continue with transaction with 560th record.
    Can anyone suggest me in this case.
    Regards,
    yuva

    >
    How to handle the Bad record as part of the insertion/updation to avoid the transaction.
    >
    Use the SAVE EXCEPTIONS clause of the FORALL if you are doing bulk inserts.
    See SAVE EXCEPTIONS in the PL/SQL Language doc
    http://docs.oracle.com/cd/B28359_01/appdev.111/b28370/tuning.htm
    And then see Example 12-9 Bulk Operation that continues despite exceptions
    >
    Example 12-9 Bulk Operation that Continues Despite Exceptions
    -- Temporary table for this example:
    CREATE TABLE emp_temp AS SELECT * FROM employees;
    DECLARE
    TYPE empid_tab IS TABLE OF employees.employee_id%TYPE;
    emp_sr empid_tab;
    -- Exception handler for ORA-24381:
    errors NUMBER;
    dml_errors EXCEPTION;
    PRAGMA EXCEPTION_INIT(dml_errors, -24381);
    BEGIN
    SELECT employee_id
    BULK COLLECT INTO emp_sr FROM emp_temp
    WHERE hire_date < '30-DEC-94';
    -- Add '_SR' to job_id of most senior employees:
    FORALL i IN emp_sr.FIRST..emp_sr.LAST SAVE EXCEPTIONS
    UPDATE emp_temp SET job_id = job_id || '_SR'
    WHERE emp_sr(i) = emp_temp.employee_id;
    -- If errors occurred during FORALL SAVE EXCEPTIONS,
    -- a single exception is raised when the statement completes.
    EXCEPTION
    -- Figure out what failed and why
    WHEN dml_errors THEN
    errors := SQL%BULK_EXCEPTIONS.COUNT;
    DBMS_OUTPUT.PUT_LINE
    ('Number of statements that failed: ' || errors);
    FOR i IN 1..errors LOOP
    DBMS_OUTPUT.PUT_LINE('Error #' || i || ' occurred during '||
    'iteration #' || SQL%BULK_EXCEPTIONS(i).ERROR_INDEX);
    DBMS_OUTPUT.PUT_LINE('Error message is ' ||
    SQLERRM(-SQL%BULK_EXCEPTIONS(i).ERROR_CODE));
    END LOOP;
    END;
    DROP TABLE emp_temp;

  • Bulk collect with Nested loops

    Hi I've a requirement like this
    I need to pull request nos from table a(Master table)
    For every request no I need to pull request details from table b(Detail Table)
    For every request no I need to pull contact details from table c
    For every request no I need to pull customer data table d and I need to create a flat file with that data so I'm using utl_file in normal query criterion because of nested loops it's taking lot of time so I want to use bulk collect with dynamic query option:
    Sample code
    =======
    create or replace procedure test(region varchar2) as
    type tablea_request_typ is table of varchar2(10);
    tablea_data tablea_request_typ;
    type tableb_request_typ is table of varchar2(1000);
    tableb_data tableb_request_typ;
    type tablec_request_typ is table of varchar2(1000);
    tablec_data tablec_request_typ;
    type tabled_request_typ is table of varchar2(1000);
    tabled_data tabled_request_typ;
    stmta varchar2(32000);
    stmtb varchar2(32000);
    stmtc varchar2(32000);
    stmtd varchar2(32000);
    rcura SYS_REFCURSOR;
    rcurb SYS_REFCURSOR;
    rcurc SYS_REFCURSOR;
    rcurd SYS_REFCURSOR;
    begin
    stmta:='select  request_no from tablea where :region'||'='NE';
    stmtb:='select  request_no||request_detail1||request_detail2 stringb  from table b where :region'||'='NE';
    stmtc:='select contact1||contact2||contact3||contact4  stringc from table c where :region'||'='NE';
    stmtd:='select customer1||customer2||customer3||customer4  stringd  from table c where :region'||'='NE';
    OPEN rcura for stmta;
      LOOP
      FETCH rcura BULK COLLECT INTO request_no
      LIMIT 1000;
      FOR  f in 1..request_no.count
    LOOP
    --Tableb
        OPEN rcurb for stmtb USING substr(request_no(f),1,14);
      LOOP
      FETCH rcurb BULK COLLECT INTO tableb_data
    for i in 1..tableb_data.count
    LOOP
    utl_file(...,tableb_data(i));
    END LOOP;
        EXIT WHEN rcurb%NOTFOUND;
      END LOOP;
    -- Tablec
    OPEN rcurc for stmtc USING substr(request_no(f),1,14);
      LOOP
      FETCH rcurb BULK COLLECT INTO tablec_data
    for i in 1..tablec_data.count
    LOOP
    utl_file(...,tablec_data(i));
    END LOOP;
        EXIT WHEN rcurc%NOTFOUND;
      END LOOP;
    -- Tabled
    OPEN rcurd for stmtd USING substr(request_no(f),1,14);
      LOOP
      FETCH rcurd BULK COLLECT INTO tabled_data
    for i in 1..tabled_data.count
    LOOP
    utl_file(...,tabled_data(i));
    END LOOP;
        EXIT WHEN rcurd%NOTFOUND;
      END LOOP;
      END LOOP;
        EXIT WHEN rcura%NOTFOUND;
      END LOOP;
    exception
    when other then
    dbms_output.put_line(sqlerrm);
    end;I 'm using code mentioned above but request nos are repeating as if it's an infinete loop ?for ex if request no is 222 It should run once but here it's running more than once?
    How to pass bind parameters say in my case region?
    Are there any alternate solutions to run it faster apart from using bulk collect?
    Right now I'm using explicit cursor with for loop which is taking lot of time ?so is this better sol?
    Thanks,
    Mahender
    Edited by: BluShadow on 24-Aug-2011 08:52
    added {noformat}{noformat} tags. Please read {message:id=9360002} to learn to format your code/data yourself.                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                   

        Use Parameterized cursor :
    CREATE OR REPLACE PROCEDURE test(region varchar2)
    AS
      type tablea_request_typ is table of varchar2(10);
      type tableb_request_typ is table of varchar2(1000); 
      type tablec_request_typ is table of varchar2(1000);
      type tabled_request_typ is table of varchar2(1000);
      tablea_data tablea_request_typ;
      tableb_data tableb_request_typ;
      tablec_data tablec_request_typ;
      tabled_data tabled_request_typ;
       CURSOR rcura(v_region  VARCHAR2(100))
       IS
       select  request_no from tablea where region = v_region;
       CURSOR rcurb(v_input  VARCHAR2(100))
       IS
       select  request_no||request_detail1||request_detail2 stringb  from table b where request_num = v_input;
       CURSOR rcurc(v_input  VARCHAR2(100))
       IS
       select  select contact1||contact2||contact3||contact4  stringc from table c where request_num = v_input;
       CURSOR rcurd(v_input  VARCHAR2(100))
       IS
       select  select customer1||customer2||customer3||customer4  stringd  from table c where request_num = v_input;
    BEGIN
    OPEN rcura('NE');
    LOOP
        FETCH rcura BULK COLLECT INTO request_no  LIMIT 1000;
        FOR  f in 1..request_no.count
        LOOP
           --Tableb
           OPEN rcurb(substr(request_no(f),1,14));
           LOOP
              FETCH rcurb BULK COLLECT INTO tableb_data
              for i in 1..tableb_data.count
              LOOP
                  utl_file(...,tableb_data(i));
              END LOOP;
              EXIT WHEN rcurb%NOTFOUND;
           END LOOP;
           -- Tablec
           OPEN rcurc (substr(request_no(f),1,14));
           LOOP
              FETCH rcurb BULK COLLECT INTO tablec_data
              for i in 1..tablec_data.count
              LOOP
                 utl_file(...,tablec_data(i));
              END LOOP;
              EXIT WHEN rcurc%NOTFOUND;
           END LOOP;
           -- Tabled
           OPEN rcurd ( substr(request_no(f),1,14) );
           LOOP
              FETCH rcurd BULK COLLECT INTO tabled_data
              for i in 1..tabled_data.count
              LOOP
               utl_file(...,tabled_data(i));
              END LOOP;
              EXIT WHEN rcurd%NOTFOUND;
           END LOOP;
      END LOOP;
    EXIT WHEN rcura%NOTFOUND;
    END LOOP;
    EXCEPTION
    WHEN OTHERS THEN
       dbms_output.put_line(dbms_utility.format_error_backtrace);
    END;
    /Hope this helps. If not, post your table structures...

  • Which is better??? for loop or bulk collect

    declare
    cursor test is
    select * from employees;
    begin
    open test;
    loop
    exit when test%notfound;
    fetch test into myvar_a(i); CASE A
    i:=i+1;
    end loop;
    close test;
    open test;
    fetch test bulk collect into myvar_b; CASE B
    close test;
    end;
    Which case is better?? A or B?
    Edited by: Kakashi on May 31, 2009 12:54 AM

    Depends on the meaning of better.
    Generally case B should be faster although a bit more elaborate code is required.
    But there may be exceptions. I think I read somewhere (I'm home now and I cannot find it at the moment) that in 10g (or 11g - not sure) 100 rows at a time are pre-fetched behind scenes even when you use case A. So using case B with a low limit could well be slower.
    If I can express an additional opinion case F(irst) is nearly always the best i.e. plain SQL (no loops at all). I'm aware that sometimes it cannot be used, but should be the first approach to be tried.
    Regards
    Etbin
    FOUND: http://asktom.oracle.com/pls/asktom/f?p=100:11:0::::P11_QUESTION_ID:213366500346264333
    CONTAINS
    Hey Tom, love the site. I noticed in your first fetch, which was in the first for loop that did an unconditional exit:
    2 for x in ( select rownum r, t1.* from big_table.big_table t1 )
    3 loop
    4 exit;
    5 end loop;
    In looking at the TKPROF output for that query, it shows the number of rows being fetched as 100. Does that prove / demonstrate the bulk collecting optimization that Oracle added in 10g, where it implicitly and automatically does a bulk collect of limit 100 behind the scenes?
    This came up at a discussion at my site very recently, and I think I can just point them to your example here as a demo rather than creating my own. I assume that if you ran the same thing in 9iR2, then that first fetch of rows in TKPROF would only show 1?
    Followup April 18, 2007 - 1pm US/Eastern:
    yes, that demonstrates the implicit array fetch of 100 rows...
    in 9i, it would show 1 row fetched.
    Edited by: Etbin on 31.5.2009 10:38

  • ADS search returns only 1000 nodes...

    Executing a search against the root node of ADS returns only 1000 objects despite the fact that that we have done searchControls.setCountLimit(0). We are using a non-anonymous connection which, in fact, is using an non-administrator's DN and password to login. When we use a non-anonymous connection which uses an administrator's DN and password, we get 2000 entries returned--so we have confidence that some setting inside ADS is the problem.
    Anyone know exactly what we need to tweak within ADS to allow us to get back a truly unlimited number of "hits" when searching?
    Thanks,
    Richard

    Here is the code snippet that helped me in fetching more than 1000 users hope this helps all.
              InitialLdapContext aContext = null;
              PagedResultsControl pageResultControl = null;
              try {
                   pageResultControl = new PagedResultsControl(999);
              } catch (Exception e) {
                   // TODO Auto-generated catch block
              Control[] controls = { pageResultControl };
              //this intializing value need to be take from
              //a configuration property
              List list = new ArrayList(1000);
              try {
                   //env is the propeties to be given for
                   //connneting to the AD/ldap
                   aContext = new InitialLdapContext(env, null);
                   aContext.setRequestControls(controls);
              } catch (Exception e1) {
                   // TODO Auto-generated catch block
              String searchBase ="dc=domain,dc=com";
              String[] attributes = {"cn","sAMAccountName","mail"};
              String searchFilter = "(objectClass=user)"
              SearchControls control = new SearchControls();
              //setting the count limit to zero
              //makes the system to fetch without limit
              control.setCountLimit(0);
              //set the time limit to zero for no time out
              control.setTimeLimit(0);
              control.setDerefLinkFlag(true);
              control.setReturningAttributes(LDAPManager.getMappedAttributes());
              control.setSearchScope(SearchControls.SUBTREE_SCOPE);
              SearchResult searchResult = null;
              NamingEnumeration ne = null;
              NamingEnumeration atNe = null;
              try {
              int i = 0;
              //a do while loop is executed
              //the do while iterated since
              //since the page result controls are finished
              byte cookie[] = null;
              String attrName = "";
              String attrValue = "";
              Attribute attribute = null;
              boolean toBeByPassed = false;
              do {
                   ne = aContext.search(searchBase, searchFilter, control);
                   while (ne.hasMoreElements()) {
                   searchResult = (SearchResult) e.nextElement();               if (searchResult == null)
                   continue;
                        i++;
                   //here construct the ldap
                   //user object and then
                   //populate to the array list
                   Attributes resultAttributes = searchResult.getAttributes();
                   if (resultAttributes == null)
                        continue;
                   atNe = resultAttributes.getAll();
                   if (atNe == null)
                   continue;
                   while (atNe.hasMoreElements()) {
                   //do what ever you want in this loop
                   //to get the attributes
              }//closing brace of the while loop of the attribute
              // enumeration
              }//end of the while loop of naming enumeration
              cookie = ((PagedResultsResponseControl) aContext.getResponseControls()[0]).getCookie();
              if ((cookie != null)) {
              System.out.println("Paging again for -> " + searchFilter);
                   aContext.setRequestControls(new Control[] { new PagedResultsControl(999, cookie, Control.CRITICAL) });
    } while (cookie != null);//continue until the server stops sending cookies
         } catch (Exception e) {
              System.out.println("OOPs i encountered some error while processing ");
              } finally {
                   //release the context here infact close it here

Maybe you are looking for

  • Different ship to party for one of two line items from sales order

    Hi Experts, 1. The Sales Order No.19701 is an Export Order, received from M/s. Industries & Construction, Indonesia. We have to supply all the materials on FOB Indian seaport basis. 2. However, we have to dispatch  against Line Item No. 280 & 290 to

  • Refresh cache or something else

    I work on http to RFC scenario. After a long time it works. But I don't know why? How I make it work. so How long will it take for cache refresh? Before that, there is only one message(http to RFC) in sxmb_moni, then I can see two records for each te

  • Copy shopping cart item (originally from catalog), able to change price

    We create a shopping cart item by selecting a catalog item from CCM 2.0. The price is not changeable, as we would expect. We then copy that item in the cart to create item 2. However, the price in item 2 is now changeable, even though it was copied f

  • Mail error log - what does this mean?

    I am using Mail 6.2 on ML 10.8.2. Account is setup as Exchange accessing my OWA account.  I can receive and send mails but Mail will not sync my subfolders. I have a large mail file (~30GB) as we are required by our company to not archive early than

  • IMac Retina: Change in color tone on wake up?

    It's been discussed here before, but I thought I'd bring it up again to see if someone else's noticed it by now when the iMac's been out for a while. When waking up from sleep, my iMac does two things. After maybe two seconds it does a light adjustme