Performance issue in procedure

Hi All
i have a performance issue with below procedure it is taking 10-15 hrs .custom table has 2 lacks record .
PROCEDURE update_summary_dollar_amounts( p_errbuf OUT VARCHAR2
,p_retcode OUT NUMBER) IS
v_customer_id NUMBER := NULL;
pymt_count NUMBER := 0;
rec_count NUMBER := 0;
v_number_late NUMBER;
v_number_on_time NUMBER;
v_days_late NUMBER;
v_avg_elapsed NUMBER;
v_avg_elapsed_US NUMBER;
v_percent_prompt NUMBER;
v_percent_late NUMBER;
v_number_open NUMBER;
v_last_payment_amount NUMBER;
v_last_payment_date DATE;
v_prev_payment_amount NUMBER;
v_prev_payment_date DATE;
v_last_sale_amount NUMBER;
v_last_sale_date DATE;
v_mtd_sales NUMBER;
v_ytd_sales NUMBER;
v_prev_year_sales NUMBER;
v_prev_receipt_num VARCHAR2(30);
v_last_sale VARCHAR2(50);
c_current_year VARCHAR2(4);
c_previous_year VARCHAR2(4);
c_current_month VARCHAR2(8);
/* ====================================================================== */
/* CURSOR Customer Cursor (Main Customer) LOOP */
/* ====================================================================== */
CURSOR customer_cursor IS
SELECT cst.customer_id customer_id
,cst.customer_number customer_number
,cst.org_id org_id
FROM zz_ar_customer_summary_all cst
ORDER by cst.customer_id;
/* ====================================================================== */
/* CURSOR Payments Cursor LOOP */
/* Note: This logic is taken from the Customer Credit Snapshot */
/* Report - ARXCCS */
/* ====================================================================== */
CURSOR payments_cursor IS
SELECT cr.receipt_number receipt_num
,NVL(cr.amount,0) amount
,crh.gl_date gl_date
FROM ar_lookups
,ar_cash_receipts_all cr
,ar_cash_receipt_history_all crh
,ar_receivable_applications_all ra
,ra_customer_trx_all ct
WHERE NVL(cr.type,'CASH') = ar_lookups.lookup_code
AND ar_lookups.lookup_type = 'PAYMENT_CATEGORY_TYPE'
AND cr.pay_from_customer = v_customer_id
AND cr.cash_receipt_id = ra.cash_receipt_id
AND cr.cash_receipt_id = crh.cash_receipt_id
AND crh.first_posted_record_flag = 'Y'
AND ra.applied_customer_trx_id = ct.customer_trx_id(+)
ORDER BY cr.creation_date DESC
,cr.cash_receipt_id DESC
,ra.creation_date DESC;
customer_record customer_cursor%rowtype;
payments_record payments_cursor%rowtype;
BEGIN
p_errbuf := NULL;
p_retcode := 0;
c_current_year := TO_CHAR(SYSDATE,'YYYY');
c_current_month := TO_CHAR(SYSDATE,'YYYYMM');
c_previous_year := TO_CHAR(TO_NUMBER(c_current_year) - 1);
FOR customer_record IN customer_cursor LOOP
/* Get Days Late and Average Elapsed Days */
/* Note: This logic is taken from the Customer Credit Snapshot */
/* Report - ARXCCS */
BEGIN
v_customer_id := customer_record.customer_id;
BEGIN
SELECT DECODE(COUNT(cr.deposit_date), 0, 0, ROUND(SUM(cr.deposit_date - ps.trx_date) / COUNT(cr.deposit_date))) avgdays
,DECODE(COUNT(cr.deposit_date), 0, 0, ROUND(SUM(cr.deposit_date - ps.due_date) / COUNT(cr.deposit_date))) avgdayslate
,NVL(SUM(DECODE(SIGN(cr.deposit_date - ps.due_date),1, 1, 0)), 0) newlate
,NVL(SUM( DECODE(SIGN(cr.deposit_date - ps.due_date),1, 0, 1)), 0) newontime
INTO v_avg_elapsed
,v_days_late
,v_number_late
,v_number_on_time
FROM ar_receivable_applications_all ra
,ar_cash_receipts_all cr
,ar_payment_schedules_all ps
WHERE ra.cash_receipt_id = cr.cash_receipt_id
AND ra.applied_payment_schedule_id = ps.payment_schedule_id
AND ps.customer_id = v_customer_id
AND ra.apply_date BETWEEN ADD_MONTHS(SYSDATE, -12) AND SYSDATE
AND ra.status = 'APP'
AND ra.display = 'Y'
AND NVL(ps.receipt_confirmed_flag,'Y') = 'Y';
EXCEPTION
WHEN NO_DATA_FOUND THEN
v_days_late := NULL;
v_number_late := NULL;
v_avg_elapsed := NULL;
v_number_on_time := NULL;
END;
IF (v_number_on_time + v_number_late) > 0
THEN
v_percent_prompt := ROUND(v_number_on_time/(v_number_on_time + v_number_late),2) * 100;
v_percent_late := ROUND(v_number_late/(v_number_on_time + v_number_late),2) * 100;
ELSE
v_percent_prompt := 0;
v_percent_late := 0;
END IF;
/* C2# 49827 */
/* Get new average elapsed days for US use only */
v_avg_elapsed_us := NULL;
IF NVL(customer_record.org_id,-999) = 114
THEN
v_avg_elapsed_us := 0;
BEGIN
SELECT ROUND(SUM(NVL(ra.amount_applied,0) * (cr.deposit_date - ps.trx_date)) / DECODE(SUM(NVL(ra.amount_applied,0)),0,1,SUM(NVL(ra.amount_applied,0)))) avg_elapsed_us
INTO v_avg_elapsed_us
FROM ar_receivable_applications_all ra
,ar_cash_receipts_all cr
,ar_payment_schedules_all ps
WHERE ra.cash_receipt_id = cr.cash_receipt_id
AND ra.applied_payment_schedule_id = ps.payment_schedule_id
AND ps.customer_id = v_customer_id
AND ra.apply_date BETWEEN ADD_MONTHS(SYSDATE, -06) AND SYSDATE
AND ps.status = 'CL'
AND ra.status = 'APP'
AND ra.display = 'Y'
AND nvl(ps.receipt_confirmed_flag,'Y') = 'Y'
AND ra.amount_applied <> 0;
v_avg_elapsed_us := NVL(v_avg_elapsed_us,0);
EXCEPTION
WHEN NO_DATA_FOUND THEN
v_avg_elapsed_us := NULL;
END;
END IF;
END;
/* Get MTD, YTD, Prev Year Sales */
/* Note: This logic is taken from the Customer Credit Snapshot */
/* Report - ARXCCS */
BEGIN
SELECT NVL(SUM(DECODE(TO_CHAR(ps.trx_date,'YYYYMM'),c_current_month,amount_due_original,0)),0) mtd_sales
,NVL(SUM(DECODE(TO_CHAR(ps.trx_date,'YYYY'),c_current_year,amount_due_original,0)),0) ytd_sales
,NVL(SUM(DECODE(TO_CHAR(ps.trx_date,'YYYY'),c_previous_year,amount_due_original,0)),0) prev_sales
,SUM(DECODE(ps.status,'OP',(DECODE(SIGN(amount_due_original),1,1,0)),0)) number_open
INTO v_mtd_sales
,v_ytd_sales
,v_prev_year_sales
,v_number_open
FROM ar_payment_schedules_all ps
WHERE ps.customer_id = v_customer_id
AND ps.class != 'PMT';
EXCEPTION
WHEN NO_DATA_FOUND THEN
v_mtd_sales := NULL;
v_ytd_sales := NULL;
v_prev_year_sales := NULL;
END;
/* Get Last and Previous Payments */
pymt_count := 0;
v_last_payment_date := NULL;
v_prev_payment_date := NULL;
v_last_payment_amount := NULL;
v_prev_payment_amount := NULL;
v_prev_receipt_num := NULL;
FOR payments_record IN payments_cursor LOOP
BEGIN
IF payments_record.receipt_num = v_prev_receipt_num
THEN
NULL;
ELSIF pymt_count = 0
THEN
v_last_payment_date := payments_record.gl_date;
v_last_payment_amount := payments_record.amount;
pymt_count := pymt_count +1;
v_prev_receipt_num := payments_record.receipt_num;
ELSIF pymt_count = 1
THEN
v_prev_payment_date := payments_record.gl_date;
v_prev_payment_amount := payments_record.amount;
EXIT;
ELSE
EXIT;
END IF;
END;
END LOOP;
/* Get Last Sale Date and Amount */
/* Note: This logic is taken from the Customer Credit Snapshot */
/* Report - ARXCCS */
BEGIN
SELECT MAX(TO_CHAR(ct.trx_date,'YYYYDDD')||ps.amount_due_original)
INTO v_last_sale
FROM ra_cust_trx_types_all ctt
,ar_payment_schedules_all ps
,ra_customer_trx_all ct
WHERE ps.customer_trx_id = ct.customer_trx_id
AND ct.cust_trx_type_id = ctt.cust_trx_type_id
AND ct.bill_to_customer_id = v_customer_id
AND ps.class || '' = 'INV'
ORDER BY ct.trx_date DESC
,ct.customer_trx_id DESC;
EXCEPTION
WHEN NO_DATA_FOUND
THEN
v_last_sale := NULL;
END;
IF v_last_sale IS NOT NULL
THEN
v_last_sale_date := TO_DATE(SUBSTR(v_last_sale,1,7),'YYYYDDD');
v_last_sale_amount := SUBSTR(v_last_sale,8,15);
ELSE
v_last_sale_date := NULL;
v_last_sale_amount := NULL;
END IF;
/* Update Values into ZZ_AR_CUSTOMER_SUMMARY_ALL */
BEGIN
UPDATE zz_ar_customer_summary_all
SET sales_last_year = v_prev_year_sales
,sales_ytd = v_ytd_sales
,sales_mtd = v_mtd_sales
,last_sale_date = v_last_sale_date
,last_sale_amount = v_last_sale_amount
,last_payment_date = v_last_payment_date
,last_payment_amount = v_last_payment_amount
,previous_payment_date = v_prev_payment_date
,previous_payment_amount = v_prev_payment_amount
,prompt = v_percent_prompt
,late = v_percent_late
,avg_elapsed_days = v_avg_elapsed
,avg_elapsed_days_us = v_avg_elapsed_us -- C2# 49827
,days_late = v_days_late
,number_open = v_number_open
WHERE customer_id = customer_record.customer_id;
EXCEPTION
WHEN PROGRAM_ERROR THEN NULL;
WHEN DUP_VAL_ON_INDEX THEN NULL;
WHEN STORAGE_ERROR THEN NULL;
WHEN OTHERS THEN NULL;
END;
rec_count := rec_count + 1;
IF rec_count = 10000
THEN
COMMIT;
rec_count := 0;
fnd_file.put_line(fnd_file.output,'Commit at customer_id = ' || TO_CHAR(customer_record.customer_id) || ' ' || TO_CHAR(SYSDATE, 'DD-MON-YYYY HH24:MI:SS'));
fnd_file.new_line(fnd_file.output,1);
END IF;
END LOOP;
COMMIT;
EXCEPTION
WHEN others THEN
ROLLBACK;
p_retcode := 2;
p_errbuf := SQLERRM;
END update_summary_dollar_amounts;
Thanks,
Anu

Based on my initial assessment of the code, it looks like you are utilizing the "slow by slow" method. It is often termed "slow by slow" because it is one of the most INefficient ways of doing data processing. The "slow by slow" method uses CURSOR FOR LOOPs to loop through entire record sets and process them one at a time. In your case it looks like you are using NESTED FOR LOOPs which could exacerbate the problem.
I recommend you re-think your approach and try and do everything in a single, or a few SQL statements if possible and avoid the procedural logic.
If you can post your business requirements, and sample data we may be able to help you achieve your goal.
HTH!

Similar Messages

  • Can someone help me diagnose a strange stored procedure performance issue please?

    I have a stored procedure (posted below) that returns message recommendations based upon the Yammer Networks you have selected. If I choose one network this query takes less than one second. If I choose another this query takes 9 - 12 seconds.
    /****** Object: StoredProcedure [dbo].[MessageView_GetOutOfContextRecommendations_LargeSet] Script Date: 2/18/2015 3:10:35 PM ******/
    SET ANSI_NULLS ON
    GO
    SET QUOTED_IDENTIFIER ON
    GO
    CREATE PROCEDURE [dbo].[MessageView_GetOutOfContextRecommendations_LargeSet]
    -- Parameters
    @UserID int,
    @SourceMessageID int = 0
    AS
    BEGIN
    -- variable for @HomeNeworkUserID
    Declare @HomeNeworkUserID int
    -- Set the HomeNetworkID
    Set @HomeNeworkUserID = (Select HomeNetworkUserID From NetworkUser Where UserID = @UserID)
    -- SET NOCOUNT ON added to prevent extra result sets from
    -- interfering with SELECT statements.
    SET NOCOUNT ON
    -- Begin Select Statement
    Select Top 40 [CreatedDate],[FileDownloadUrl],[HasLinkOrAttachment],[ImagePreviewUrl],[LikesCount],[LinkFileName],[LinkType],[MessageID],[MessageSource],[MessageText],[MessageWebUrl],[NetworkID],[NetworkName],[PosterEmailAddress],[PosterFirstName],[PosterImageUrl],[PosterName],[PosterUserName],[PosterWebUrl],[RepliesCount],[Score],[SmallIconUrl],[Subjects],[SubjectsCount],[UserID]
    -- From View
    From [MessageView]
    -- Do Not Return Any Messages That Have Been Recommended To This User Already
    Where [MessageID] Not In (Select MessageID From MessageRecommendationHistory Where UserID = @UserID)
    -- Do Not Return Any Messages Created By This User
    And [UserID] != @UserID
    -- Do Not Return The MessageID
    And [MessageID] != @SourceMessageID
    -- Only return messages for the Networks the user has selected
    And [NetworkID] In (Select NetworkID From NetworkUser Where [HomeNetworkUserID] = @HomeNeworkUserID And [AllowRecommendations] = 1)
    -- Order By [MessageScore] and [MessageCreatedDate] in reverse order
    Order By [Score] desc, [CreatedDate] desc
    ENDThe Actual Execution Plan Shows up the same; there are more messages on the Network that is slow, 2800 versus 1,500 but the difference is ten times longer on the slow network.Is the fact I am doing a Top 40 what makes it slow? My first guess was to take the Order By Off and that didn't seem to make any difference.The execution plan is below, it takes 62% of the query to look up theIX_Message.Score which is the clustered index, so I thought this would be fast. Also the Clustered Index Seek for the User.UserID take 26%which seems high for what it is doing.
    I have indexes on every field that is queried on so I am kind of at a loss as to where to go next.
    It just seems strange because it is the same view being queried in both cases.
    I tried to run the SQL Server Tuning Wizard but it doesn't run on Azure SQL, and my problem doesn't occur on the data in my local database.
    Thanks for any guidance, I know a lot of the slowness is due to the lower tier Azure SQL we are using, many of the performance issues weren't noticed when were on the full SQL Server, but the other networks work extremely fast so it has to be something to
    with having more rows.
    In case you need the SQL for the View that I am querying it is:
    SET QUOTED_IDENTIFIER ON
    GO
    CREATE VIEW [dbo].[MessageView]
    AS
    SELECT M.UserID, M.MessageID, M.NetworkID, N.Name AS NetworkName, M.Subjects, M.SubjectsCount, M.RepliesCount, M.LikesCount, M.CreatedDate, M.MessageText, M.HasLinkOrAttachment, M.Score, M.WebUrl AS MessageWebUrl, U.UserName AS PosterUserName,
    U.Name AS PosterName, U.FirstName AS PosterFirstName, U.ImageUrl AS PosterImageUrl, U.EmailAddress AS PosterEmailAddress, U.WebUrl AS PosterWebUrl, M.MessageSource, M.ImagePreviewUrl, M.LinkFileName, M.FileDownloadUrl, M.LinkType, M.SmallIconUrl
    FROM dbo.Message AS M INNER JOIN
    dbo.Network AS N ON M.NetworkID = N.NetworkID INNER JOIN
    dbo.[User] AS U ON M.UserID = U.UserID
    GO
    The Network Table has an Index on Network ID, but it non clustered but I don't think that is the culprit.
    Corby

    I marked your response as answer because you gave me information I didn't have about the sort. I ended up rewriting the query to be a join instead of the In's and it improved dramatically, about one second on a very minimal Azure SQL database, and before
    it was 12 seconds on one network. We didn't notice the problem at all before we moved to Azure SQL, it was about one - three seconds at most.
    Here is the updated way that was much more efficient:
    CREATE PROCEDURE [dbo].[Procedure Name]
    -- Parameters
    @UserID int,
    @SourceMessageID int = 0
    AS
    BEGIN
    -- variable for @HomeNeworkUserID
    Declare @HomeNeworkUserID int
    -- Set the HomeNetworkID
    Set @HomeNeworkUserID = (Select HomeNetworkUserID From NetworkUser Where UserID = @UserID)
    -- SET NOCOUNT ON added to prevent extra result sets from
    -- interfering with SELECT statements.
    SET NOCOUNT ON
    ;With cteMessages As
    -- Begin Select Statement
    Select (Fields List)
    -- Join to Network Table
    From MessageView mv Inner Join NetworkUser nu on MV.NetworkID = nu.NetworKID -- Only Return Networks This User Has Selected
    Where nu.HomeNetworkUserID = @HomeNeworkUserID And AllowRecommendations = 1
    -- Do Not Return Any Messages Created By This User
    And mv.[UserID] != @UserID
    -- Do Not Return The MessageID
    And mv.[MessageID] != @SourceMessageID
    ), cteHistoryForThisUser As
    Select MessageID From MessageRecommendationHistory Where UserID = @UserID
    -- Begin Select Statement
    Select Top 40 (Fields List)
    -- Join to Network Table
    From cteMessages m Left Outer Join cteHistoryForThisUser h on m.MessageID = h.MessageID
    -- Do Not Return Any Items Where User Has Already been shown this Message
    Where h.MessageID Is Null
    -- An Order By Is Needed To Get The Best Content First
    Order By Score Desc
    END
    GO
    The Left Outer Join to test for null was the biggest improvement, but it also helped to join to the NetworkUser table instead of do the In sub query.

  • RE: Case 59063: performance issues w/ C TLIB and Forte3M

    Hi James,
    Could you give me a call, I am at my desk.
    I had meetings all day and couldn't respond to your calls earlier.
    -----Original Message-----
    From: James Min [mailto:jminbrio.forte.com]
    Sent: Thursday, March 30, 2000 2:50 PM
    To: Sharma, Sandeep; Pyatetskiy, Alexander
    Cc: sophiaforte.com; kenlforte.com; Tenerelli, Mike
    Subject: Re: Case 59063: performance issues w/ C TLIB and Forte 3M
    Hello,
    I just want to reiterate that we are very committed to working on
    this issue, and that our goal is to find out the root of the problem. But
    first I'd like to narrow down the avenues by process of elimination.
    Open Cursor is something that is commonly used in today's RDBMS. I
    know that you must test your query in ISQL using some kind of execute
    immediate, but Sybase should be able to handle an open cursor. I was
    wondering if your Sybase expert commented on the fact that the server is
    not responding to commonly used command like 'open cursor'. According to
    our developer, we are merely following the API from Sybase, and open cursor
    is not something that particularly slows down a query for several minutes
    (except maybe the very first time). The logs show that Forte is waiting for
    a status from the DB server. Actually, using prepared statements and open
    cursor ends up being more efficient in the long run.
    Some questions:
    1) Have you tried to do a prepared statement with open cursor in your ISQL
    session? If so, did it have the same slowness?
    2) How big is the table you are querying? How many rows are there? How many
    are returned?
    3) When there is a hang in Forte, is there disk-spinning or CPU usage in
    the database server side? On the Forte side? Absolutely no activity at all?
    We actually have a Sybase set-up here, and if you wish, we could test out
    your database and Forte PEX here. Since your queries seems to be running
    off of only one table, this might be the best option, as we could look at
    everything here, in house. To do this:
    a) BCP out the data into a flat file. (character format to make it portable)
    b) we need a script to create the table and indexes.
    c) the Forte PEX file of the app to test this out.
    d) the SQL staement that you issue in ISQL for comparison.
    If the situation warrants, we can give a concrete example of
    possible errors/bugs to a developer. Dial-in is still an option, but to be
    able to look at the TOOL code, database setup, etc. without the limitations
    of dial-up may be faster and more efficient. Please let me know if you can
    provide this, as well as the answers to the above questions, or if you have
    any questions.
    Regards,
    At 08:05 AM 3/30/00 -0500, Sharma, Sandeep wrote:
    James, Ken:
    FYI, see attached response from our Sybase expert, Dani Sasmita. She has
    already tried what you suggested and results are enclosed.
    ++
    Sandeep
    -----Original Message-----
    From: SASMITA, DANIAR
    Sent: Wednesday, March 29, 2000 6:43 PM
    To: Pyatetskiy, Alexander
    Cc: Sharma, Sandeep; Tenerelli, Mike
    Subject: Re: FW: Case 59063: Select using LIKE has performance
    issues
    w/ CTLIB and Forte 3M
    We did that trick already.
    When it is hanging, I can see what is doing.
    It is doing OPEN CURSOR. But not clear the exact statement of the cursor
    it is trying to open.
    When we run the query directly to Sybase, not using Forte, it is clearly
    not opening any cursor.
    And running it directly to Sybase many times, the response is always
    consistently fast.
    It is just when the query runs from Forte to Sybase, it opens a cursor.
    But again, in the Forte code, Alex is not using any cursor.
    In trying to capture the query,we even tried to audit any statementcoming
    to Sybase. Same thing, just open cursor. No cursor declaration anywhere.==============================================
    James Min
    Technical Support Engineer - Forte Tools
    Sun Microsystems, Inc.
    1800 Harrison St., 17th Fl.
    Oakland, CA 94612
    james.minsun.com
    510.869.2056
    ==============================================
    Support Hotline: 510-451-5400
    CUSTOMERS open a NEW CASE with Technical Support:
    http://www.forte.com/support/case_entry.html
    CUSTOMERS view your cases and enter follow-up transactions:
    http://www.forte.com/support/view_calls.html

    Earthlink wrote:
    Contrary to my understanding, the <font face="courier">with_pipeline</font> procedure runs 6 time slower than the legacy <font face="courier">no_pipeline</font> procedure. Am I missing something? Well, we're missing a lot here.
    Like:
    - a database version
    - how did you test
    - what data do you have, how is it distributed, indexed
    and so on.
    If you want to find out what's going on then use a TRACE with wait events.
    All nessecary steps are explained in these threads:
    HOW TO: Post a SQL statement tuning request - template posting
    http://oracle-randolf.blogspot.com/2009/02/basic-sql-statement-performance.html
    Another nice one is RUNSTATS:
    http://asktom.oracle.com/pls/asktom/ASKTOM.download_file?p_file=6551378329289980701

  • Performance issues with dynamic action (PL/SQL)

    Hi!
    I'm having perfomance issues with a dynamic action that is triggered on a button click.
    I have 5 drop down lists to select columns which the users want to filter, 5 drop down lists to select an operation and 5 boxes to input values.
    After that, there is a filter button that just submits the page based on the selected filters.
    This part works fine, the data is filtered almost instantaneously.
    After this, I have 3 column selectors and 3 boxes where users put values they wish to update the filtered rows to,
    There is an update button that calls the dynamic action (procedure that is written below).
    It should be straight out, the only performance issue could be the decode section, because I need to cover cases when user wants to set a value to null (@) and when he doesn't want update 3 columns, but less (he leaves '').
    Hence P99_X_UC1 || ' = decode('  || P99_X_UV1 ||','''','|| P99_X_UC1  ||',''@'',null,'|| P99_X_UV1  ||')
    However when I finally click the update button, my browser freezes and nothing happens on the table.
    Can anyone help me solve this and improve the speed of the update?
    Regards,
    Ivan
    P.S. The code for the procedure is below:
    create or replace
    PROCEDURE DWP.PROC_UPD
    (P99_X_UC1 in VARCHAR2,
    P99_X_UV1 in VARCHAR2,
    P99_X_UC2 in VARCHAR2,
    P99_X_UV2 in VARCHAR2,
    P99_X_UC3 in VARCHAR2,
    P99_X_UV3 in VARCHAR2,
    P99_X_COL in VARCHAR2,
    P99_X_O in VARCHAR2,
    P99_X_V in VARCHAR2,
    P99_X_COL2 in VARCHAR2,
    P99_X_O2 in VARCHAR2,
    P99_X_V2 in VARCHAR2,
    P99_X_COL3 in VARCHAR2,
    P99_X_O3 in VARCHAR2,
    P99_X_V3 in VARCHAR2,
    P99_X_COL4 in VARCHAR2,
    P99_X_O4 in VARCHAR2,
    P99_X_V4 in VARCHAR2,
    P99_X_COL5 in VARCHAR2,
    P99_X_O5 in VARCHAR2,
    P99_X_V5 in VARCHAR2,
    P99_X_CD in VARCHAR2,
    P99_X_VD in VARCHAR2
    ) IS
    l_sql_stmt varchar2(32600);
    p_table_name varchar2(30) := 'DWP.IZV_SLOG_DET'; 
    BEGIN
    l_sql_stmt := 'update ' || p_table_name || ' set '
    || P99_X_UC1 || ' = decode('  || P99_X_UV1 ||','''','|| P99_X_UC1  ||',''@'',null,'|| P99_X_UV1  ||'),'
    || P99_X_UC2 || ' = decode('  || P99_X_UV2 ||','''','|| P99_X_UC2  ||',''@'',null,'|| P99_X_UV2  ||'),'
    || P99_X_UC3 || ' = decode('  || P99_X_UV3 ||','''','|| P99_X_UC3  ||',''@'',null,'|| P99_X_UV3  ||') where '||
    P99_X_COL  ||' '|| P99_X_O  ||' ' || P99_X_V  || ' and ' ||
    P99_X_COL2 ||' '|| P99_X_O2 ||' ' || P99_X_V2 || ' and ' ||
    P99_X_COL3 ||' '|| P99_X_O3 ||' ' || P99_X_V3 || ' and ' ||
    P99_X_COL4 ||' '|| P99_X_O4 ||' ' || P99_X_V4 || ' and ' ||
    P99_X_COL5 ||' '|| P99_X_O5 ||' ' || P99_X_V5 || ' and ' ||
    P99_X_CD   ||       ' = '         || P99_X_VD ;
    --dbms_output.put_line(l_sql_stmt); 
    EXECUTE IMMEDIATE l_sql_stmt;
    END;

    Hi Ivan,
    I do not think that the decode is performance relevant. Maybe the update hangs because some other transaction has uncommitted changes to one of the affected rows or the where clause is not selective enough and needs to update a huge amount of records.
    Besides that - and I might be wrong, because I only know some part of your app - the code here looks like you have a huge sql injection vulnerability here. Maybe you should consider re-writing your logic in static sql. If that is not possible, you should make sure that the user input only contains allowed values, e.g. by white-listing P99_X_On (i.e. make sure they only contain known values like '=', '<', ...), and by using dbms_assert.enquote_name/enquote_literal on the other P99_X_nnn parameters.
    Regards,
    Christian

  • Performance issues with pipelined table functions

    I am testing pipelined table functions to be able to re-use the <font face="courier">base_query</font> function. Contrary to my understanding, the <font face="courier">with_pipeline</font> procedure runs 6 time slower than the legacy <font face="courier">no_pipeline</font> procedure. Am I missing something? The <font face="courier">processor</font> function is from [url http://www.oracle-developer.net/display.php?id=429]improving performance with pipelined table functions .
    Edit: The underlying query returns 500,000 rows in about 3 minutes. So there are are no performance issues with the query itself.
    Many thanks in advance.
    CREATE OR REPLACE PACKAGE pipeline_example
    IS
       TYPE resultset_typ IS REF CURSOR;
       TYPE row_typ IS RECORD (colC VARCHAR2(200), colD VARCHAR2(200), colE VARCHAR2(200));
       TYPE table_typ IS TABLE OF row_typ;
       FUNCTION base_query (argA IN VARCHAR2, argB IN VARCHAR2)
          RETURN resultset_typ;
       c_default_limit   CONSTANT PLS_INTEGER := 100;  
       FUNCTION processor (
          p_source_data   IN resultset_typ,
          p_limit_size    IN PLS_INTEGER DEFAULT c_default_limit)
          RETURN table_typ
          PIPELINED
          PARALLEL_ENABLE(PARTITION p_source_data BY ANY);
       PROCEDURE with_pipeline (argA          IN     VARCHAR2,
                                argB          IN     VARCHAR2,
                                o_resultset      OUT resultset_typ);
       PROCEDURE no_pipeline (argA          IN     VARCHAR2,
                              argB          IN     VARCHAR2,
                              o_resultset      OUT resultset_typ);
    END pipeline_example;
    CREATE OR REPLACE PACKAGE BODY pipeline_example
    IS
       FUNCTION base_query (argA IN VARCHAR2, argB IN VARCHAR2)
          RETURN resultset_typ
       IS
          o_resultset   resultset_typ;
       BEGIN
          OPEN o_resultset FOR
             SELECT colC, colD, colE
               FROM some_table
              WHERE colA = ArgA AND colB = argB;
          RETURN o_resultset;
       END base_query;
       FUNCTION processor (
          p_source_data   IN resultset_typ,
          p_limit_size    IN PLS_INTEGER DEFAULT c_default_limit)
          RETURN table_typ
          PIPELINED
          PARALLEL_ENABLE(PARTITION p_source_data BY ANY)
       IS
          aa_source_data   table_typ;-- := table_typ ();
       BEGIN
          LOOP
             FETCH p_source_data
             BULK COLLECT INTO aa_source_data
             LIMIT p_limit_size;
             EXIT WHEN aa_source_data.COUNT = 0;
             /* Process the batch of (p_limit_size) records... */
             FOR i IN 1 .. aa_source_data.COUNT
             LOOP
                PIPE ROW (aa_source_data (i));
             END LOOP;
          END LOOP;
          CLOSE p_source_data;
          RETURN;
       END processor;
       PROCEDURE with_pipeline (argA          IN     VARCHAR2,
                                argB          IN     VARCHAR2,
                                o_resultset      OUT resultset_typ)
       IS
       BEGIN
          OPEN o_resultset FOR
               SELECT /*+ PARALLEL(t, 5) */ colC,
                      SUM (CASE WHEN colD > colE AND colE != '0' THEN colD / ColE END)de,
                      SUM (CASE WHEN colE > colD AND colD != '0' THEN colE / ColD END)ed,
                      SUM (CASE WHEN colD = colE AND colD != '0' THEN '1' END) de_one,
                      SUM (CASE WHEN colD = '0' OR colE = '0' THEN '0' END) de_zero
                 FROM TABLE (processor (base_query (argA, argB),100)) t
             GROUP BY colC
             ORDER BY colC
       END with_pipeline;
       PROCEDURE no_pipeline (argA          IN     VARCHAR2,
                              argB          IN     VARCHAR2,
                              o_resultset      OUT resultset_typ)
       IS
       BEGIN
          OPEN o_resultset FOR
               SELECT colC,
                      SUM (CASE WHEN colD > colE AND colE  != '0' THEN colD / ColE END)de,
                      SUM (CASE WHEN colE > colD AND colD  != '0' THEN colE / ColD END)ed,
                      SUM (CASE WHEN colD = colE AND colD  != '0' THEN 1 END) de_one,
                      SUM (CASE WHEN colD = '0' OR colE = '0' THEN '0' END) de_zero
                 FROM (SELECT colC, colD, colE
                         FROM some_table
                        WHERE colA = ArgA AND colB = argB)
             GROUP BY colC
             ORDER BY colC;
       END no_pipeline;
    END pipeline_example;
    ALTER PACKAGE pipeline_example COMPILE;Edited by: Earthlink on Nov 14, 2010 9:47 AM
    Edited by: Earthlink on Nov 14, 2010 11:31 AM
    Edited by: Earthlink on Nov 14, 2010 11:32 AM
    Edited by: Earthlink on Nov 20, 2010 12:04 PM
    Edited by: Earthlink on Nov 20, 2010 12:54 PM

    Earthlink wrote:
    Contrary to my understanding, the <font face="courier">with_pipeline</font> procedure runs 6 time slower than the legacy <font face="courier">no_pipeline</font> procedure. Am I missing something? Well, we're missing a lot here.
    Like:
    - a database version
    - how did you test
    - what data do you have, how is it distributed, indexed
    and so on.
    If you want to find out what's going on then use a TRACE with wait events.
    All nessecary steps are explained in these threads:
    HOW TO: Post a SQL statement tuning request - template posting
    http://oracle-randolf.blogspot.com/2009/02/basic-sql-statement-performance.html
    Another nice one is RUNSTATS:
    http://asktom.oracle.com/pls/asktom/ASKTOM.download_file?p_file=6551378329289980701

  • Performance issues with Oracle EE 9.2.0.4 and RedHat 2.1

    Hello,
    I am having some serious performance issues with Oracle Enterprise Edition 9.2.0.4 and RedHat Linux 2.1. The processor goes berserk at 100% for long (some 5 min.) periods of time, and all the ram memory gets used.
    Some environment characteristics:
    Machine: Intel Pentium IV 2.0GHz with 1GB of RAM.
    OS: RedHat Linux 2.1 Enterprise.
    Oracle: Oracle Enterprise Edition 9.2.0.4
    Application: We have a small web-application with 10 users (for now) and very basic queries (all in stored procedures). Also we use the latest version of ODP.NET with default connection settings (some low pooling, etc).
    Does anyone know what could be going on?
    Is anybody else having this similar behavior?
    We change from SQL-Server so we are not the world expert on the matter. But we want a reliable system nonetheless.
    Please help us out, gives some tips, tricks, or guides…
    Thanks to all,
    Frank

    Thank you very much and sorry I couldn’t write sooner. It seems that the administrator doesn’t see the kswap going on so much, so I don’t really know what is going on.
    We are looking at some queries and some indexing but this is nuts, if I had some poor queries, which we don’t really, the server would show pick right?
    But he goes crazy and has two oracle processes taking all the resources. There seems to be little swapping going on.
    Son now what? They are all ready talking about MS-SQL please help me out here, this is crazy!!!
    We have, may be the most powerful combinations here. What is oracle doing?
    We even kill the Working Process of the IIS and have no one do anything with the database and still dose two processes going on.
    Can some one help me?
    Thanks,
    Frank

  • Performance issue in Portal Reports

    Hi
    We are experiencing a serious performance issue, in a report, and need a urgent fix on this issue.
    The report is a Reports From SQL Query report, I need to find a way to dynamically make/create the where clause otherwise I have to make the statement in a way the exclude the use of indexes.
    Full-table-scan is not a valid option here; the number or records is simply too high (several millions its a datawarehouse solution). In the Developer packaged, we can make the where clause dynamically, this basic yet extremely important feature, is essential to all database application.
    We need to know how to do it, and if this functionality is not natively supported, then this should be one of the priority one functionalities to implement in future releases.
    However, what do I do for now?
    Thank in advance

    I have found a temporary workaround, by editing the where clause in the stored procedure manually. However this fix have to be done every time a change have been committed in the wizard, so it is still not a solution to go for indefinitely, but its ok for now.

  • Performance issue with pl/sql code

    Hi Oracle Gurus,
    I am in need of your recommendations for a performance issue that I am facing in production envrionment. There is a pl/sql procedure which executes with different elapsed time at different executions. Elapsed Times are 30minutes , 40 minutes, 65 minutes , 3 minutes ,3 seconds.
    Expected elapsed time is maximum of 3 minutes. ( But some times it took 3 seconds too...! )
    Output on all different executions are same that is deletion and insertion of 12K records into a table.
    Here is the auto trace details of two different scenarios.
    Slow execution - 33.65 minutes
    Stat Name                                Statement   Per Execution % Snap
    Elapsed Time (ms)                         1,712,343    1,712,342.6    41.4
    CPU Time (ms)                             1,679,689    1,679,688.6    44.7
    Executions                                        1            N/A     N/A
    Buffer Gets                              ##########  167,257,973.0    86.9
    Disk Reads                                    1,284        1,284.0     0.4
    Parse Calls                                       1            1.0     0.0
    User I/O Wait Time (ms)                       4,264            N/A     N/A
    Cluster Wait Time (ms)                        3,468            N/A     N/A
    Application Wait Time (ms)                        0            N/A     N/A
    Concurrency Wait Time (ms)                        6            N/A     N/A
    Invalidations                                     0            N/A     N/A
    Version Count                                     4            N/A     N/A
    Sharable Mem(KB)                                 85            N/A     N/A
              -------------------------------------------------------------Fast Exection : 5 seconds
    Stat Name                                Statement   Per Execution % Snap
    Elapsed Time (ms)                            41,550       41,550.3     0.7
    CPU Time (ms)                                40,776       40,776.3     1.0
    Executions                                        1            N/A     N/A
    Buffer Gets                               2,995,677    2,995,677.0     4.2
    Disk Reads                                       22           22.0     0.0
    Parse Calls                                       1            1.0     0.0
    User I/O Wait Time (ms)                         162            N/A     N/A
    Cluster Wait Time (ms)                          621            N/A     N/A
    Application Wait Time (ms)                        0            N/A     N/A
    Concurrency Wait Time (ms)                       55            N/A     N/A
    Invalidations                                     0            N/A     N/A
    Version Count                                     4            N/A     N/A
    Sharable Mem(KB)                                 85            N/A     N/A
              -------------------------------------------------------------For security reasons, I cannot share the actual code. Its a report generating code that deletes and load the data into table using insert into select statement.
    Delete from table ;
    cursor X to get the master data ( 98 records )
    For each X loop
    insert into tableA select * from tables where a= X.a and b= X.b and c=X.c ..... ;
    -- 12 K records inserted on average
    insert into tableB select * from tables where a= X.a and b= X.b and c=X.c ..... ;
    -- 12 K records inserted on average
    end loop ;1. The select query is complex with bind variables ( explain plan varies for each values )
    2. I have checked the tablespace of the tables involved, it is 82% used. DBA confirmed that it is not the reason.
    3. Disk reads are high during long execution.
    4. At long running times, I can see a db sequential read wait event on a index object. This index is on the table where data is inserted.
    All I need to find is why this code is taking 3 seconds and 60 minutes on the same day and on the consecutive executions ?
    Is there any other approach to find the root cause of this behaviour and to fix it ? Kindly adivse.
    Thanks in advance your help.
    Regards,
    Hari
    Edited by: BluShadow on 26-Sep-2012 08:24
    edited to add {noformat}{noformat} tags.  You've been a member long enough to know to do this yourself... so please do so in future.  ({message:id=9360002})                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                               

    Hariharan ST wrote:
    Hi Oracle Gurus,
    I am in need of your recommendations for a performance issue that I am facing in production envrionment. There is a pl/sql procedure which executes with different elapsed time at different executions. Please reedit your post and add some code tags around the trace information. This would improve readability greatly and will help us to help you
    example
    {<b></b>code}
    select * from dual;{<b></b>code}
    Based upon your description I can imagine two things.
    a) The execution plan for the select query does change frequently.
    A typical reason can be not up to date statistics.
    b) Some locking / wait conflict. For example upon a UK index.
    Are there any other operations going on while it is slow? If anybody inserts a value, then your session will wait, if the same (PK/UK) value also is to be inserted.
    Those wait events can be recognized using standard tools like oracle sql developer or enterprise manager while the query is slow.
    Also go through the links that are in the FAQ. They tell you how to get better information for makeing a tuning request.
    SQL and PL/SQL FAQ
    Edited by: Sven W. on Sep 25, 2012 6:41 PM

  • Performance issue using webelements and crystal

    We have 2 reports named, u201CDAMAGE_REPORTSu201D AND u201CTDI_CHARTu201D
    DAMAGE_REPORTS
    There are 13 dynamic or static Prompts created using Webelements .After clicking on submit button(using
    webelement)  by selecting the prompt values, TDI_CHART report will get opened below the u201CDAMAGE_REPORTSu201D report.
    TDI_CHART
    TDI_CHART created using free-hand SQL which contains 2 commands
            1. Command-SQL is created based on two categories.
                  Trend -Trend By Month ,Trend By Quarter,Trend By Week
                   Non-Trend- Current Month ,Current Year to Date,Last Year to Date,Current Quarter,Last
                                 Quarter,Current Week,Last Week,Last Month,Last Year Total
                 Charts,Cross-tabs and table information is coming from this Query
           2.Image count-contains number of images information based on the prompt selection and it acts as a 
                  URL(opendocument) in the report to open TDI_THUMBNAIL report to display the thumbnail images.
    Report information
    TDI_CHART Report contains 8 charts,4 cross-tabs, summary information showing in table format(Detail
           section) and2 open document links(1.For image count 2.Export to excel(which shows the detail information
            of summary table)
    Description
    If the selection is Non-Trend Value from the Time period parameter , corresponding Chart will be shown along   with table information.(Table information is only required for Non-Trend Values).
    If the Selection is Trend Value from the Time period parameter,the corresponding cross tab and chart will be shown and others will get suppressed.
    Problem Area
    After the prompt selection from u201CDamage_Reportsu201D, TDI_CHART report is taking around 1min 30 sec to show the chart and table or cross-tab.
    In Database when we execute the above queries, it comes in 12 secs but whereas in reports it takes 1min 30 sec,can you guide us some steps to bring down this time?

    hello Mufiza,
    copy the url created at the step "After the prompt selection from u201CDamage_Reports".
    paste this url into a new browser window and press enter.
    what is the time to return this report?...this time should also be 1.5 minutes.
    it sounds like you are using 2 commands in the same report. this will often cause performance issues, just like linking two stored procedures together or linking two disparate datasources together.
    this would not be an issue related to webelements but related to the design of your target report.
    jw

  • Performance issue on query. Help needed.

    This is mainly a performance issue. I hope someone can help me on this.
    Basically I have four tables Master (150000 records), Child1 (100000+ records), Child2 (50 million records !), Child 3 (10000+ records)
    (please pardon the aliases).
    Now every record in master has more than one corresponding record in each of the child tables (one to many).
    Also there may not be any record in any or all of the tables for a particular master record.
    Now, I need to fetch the max of last_updated_date for every master record in each of the 3 child tables and then find the maximum of
    the three last_active_dates obtained from the 3 tables.
    eg: for Master ID 100, I need to query Child1 for all the records of Master ID 100 and get the max last_updated_date.
    Same for the other 2 tables and then get the maximum of these three values.
    (I also need to take care of cases where no record may be found in a child table for a Master ID)
    Writing a procedure that uses cursors that fetches the value from each of the child table hits performance
    badly. And thing is I need to find out the last_updated_date for every Master record (all 150000 of them). It'll probably take days to do this.
    SELECT MAX (C1.LAST_UPDATED_DATE)
    ,MAX (C2.LAST_UPDATED_DATE)
    ,MAX (C3.LAST_UPDATED_DATE)
    FROM CHILD1 C1
    ,CHILD2 C2
    ,CHILD3 C3
    WHERE C1.MASTER_ID = 100
    OR C2.MASTER_ID = 100
    OR C3.MASTER_ID = 100
    I tried the above but I got a temp tablespace error. I don't think the query is good enough at all.
    (The OR clause is to take care of no records in any child table. If there's an AND, then the join and hence select will
    fail even if there is no record in one child table but valid values in the other 2 tables).
    Thanks a lot.
    Edited by: user773489 on Dec 16, 2008 11:49 AM

    Not sure I understand the problem. The max you are getting from the above is already the greatest out of the three - that's why we do the UNION ALL.
    Here's sample code without output, maybe this will clear it up:
    with a as (
    select 10 MASTER_ID, to_date('12/15/2008', 'MM/DD/YYYY') LAST_DTE from dual UNION ALL
    select 20 MASTER_ID, to_date('12/01/2008', 'MM/DD/YYYY') LAST_DTE from dual UNION ALL
    select 30 MASTER_ID, to_date('12/02/2008', 'MM/DD/YYYY') LAST_DTE from dual
    b as (
    select 10 MASTER_ID, to_date('12/14/2008', 'MM/DD/YYYY') LAST_DTE from dual UNION ALL
    select 20 MASTER_ID, to_date('12/02/2008', 'MM/DD/YYYY') LAST_DTE from dual UNION ALL
    select 40 MASTER_ID, to_date('11/15/2008', 'MM/DD/YYYY') LAST_DTE from dual
    c as (
    select 10 MASTER_ID, to_date('12/07/2008', 'MM/DD/YYYY') LAST_DTE from dual UNION ALL
    select 30 MASTER_ID, to_date('11/29/2008', 'MM/DD/YYYY') LAST_DTE from dual UNION ALL
    select 40 MASTER_ID, to_date('12/13/2008', 'MM/DD/YYYY') LAST_DTE from dual
    select MASTER_ID, MAX(LAST_DTE)
    FROM
    (select MASTER_ID, LAST_DTE from a UNION ALL
    select MASTER_ID, LAST_DTE from b UNION ALL
    select MASTER_ID, LAST_DTE from c)
    group by MASTER_ID;
    MASTER_ID              MAX(LAST_DTE)            
    30                     02-DEC-08                
    40                     13-DEC-08                
    20                     02-DEC-08                
    10                     15-DEC-08                
    4 rows selectedEdited by: tk-7381344 on Dec 16, 2008 12:38 PM

  • Tool for diagnosing performance issues in oracle database

    Is there any tool to diagnose performance issues in queries and stored procedures in oracle similar to sql profiler for sql server
    Thanks

    you can use oem oracle enterprise manager to diagnose and monitor database .
    Chapter 10: Monitoring and Tuning the Database(refer the link , oracle obe series, step by step procedures with screenshot presentation)
    This chapter introduces you to some of the monitoring and tuning operations as performed through Enterprise Manager.
    http://www.oracle.com/technology/obe/2day_dba/monitoring/monitoring.htm
    refer: Monitoring and Tuning the Database
    http://download.oracle.com/docs/cd/B14117_01/server.101/b10742/montune.htm
    hope, this will helps you.
    Edited by: rajeysh on Jul 14, 2010 9:28 PM

  • Oracle Forms6i Query Performance issue - Urgent

    Hi All,
    I'm using oracle forms6i and Oracle DB 9i.
    I'm facing the performance issue in query forms.
    In detail block form taking long time to load the data.
    Form contains 2 non data blocks
    1.HDR - 3 input parameters
    2.DETAILS - Grid - Details
    HDR input fields
    1.Company Code
    2.Company ACccount No
    3.Customer Name
    Details Grid is displayed the details.
    Here there are 2 tables involved
    1.Table1 - 1 crore records
    2.Table2 - 4 crore records
    In form procedure one cursor bulid and fetch is done directly and assign the values to form block fields.
    Below i've pasted the query
    SELECT
    t1.entry_dt,
    t2.authoriser_code,
    t1.company_code,
    t1.company_ac_no
    initcap(t1.customer_name) cust_name,
    t2.agreement_no
    t1.customer_id
    FROM
    table1 t1,
    table2 t2
    WHERE
    (t2.trans_no = t1.trans_no or t2.temp_trans_no = t1.trans_no)
    AND t1.company_code = nvl(:hdr.l_company_code,t1.company_code)
    AND t1.company_ac_no = nvl(:hdr.l_company_ac_no,t1.company_ac_no)
    AND lower(t1.customer_name) LIKE lower(nvl('%'||:hdr.l_customer_name||'%' ,t1.customer_name))
    GROUP BY
    t2.authoriser_code,
    t1.company_code,
    t1.company_ac_no,
    t1.customer_name,
    t2.agreement_no,
    t1.customer_id;
    Where Clause Analysis
    1.Condition 1 OR operator (In table2 two different columbs are compared with one column in table)
    2.Like Operator
    3.All the columns has index but not used properly always full table scan
    4.NVL chk
    5.If i run the qry in backend means coming little fast,front end very slow
    Input Parameter - Query retrival data - limit
    Only compnay code means record count will be 50 - 500 records -
    Only compnay code and comp ac number means record count will be 1-5
    Only compnay code,omp ac number and customer name means record count will be 1 - 5 records
    I have tried following ways
    1.Split the query using UNIOIN (OR clause seaparted) - Nested loops COST 850 , Nested loops COST 750 - index by row id - cost is 160 ,index by row id - cost is 152 full table access.................................
    2.Dynamic SQL build - 'DBMS_SQL.DEFINE COLUMN .....
    3.Given onlu one input parameter - Nested loops COST 780 , Nested loops COST 780 - index by row id - cost is 148 ,index by row id - cost is 152 full table access.................................
    Still im facing the same issue.
    Please help me out on this.
    Thanks and Regards,
    Oracle1001

    Sudhakar P wrote:
    the below query its take more than one minute while updating the records through pro*c.
    Execute 562238 161.03 174.15 7 3932677 2274833 562238Hi Sudhakar,
    If the database is capable of executing 562,238 update statements in one minute, then that's pretty good, don't you think.
    Your real problem is in the application code which probably looks something like this in pseudocode:
    for i in (some set containing 562,238 rows)
    loop
      <your update statement with all the bind variables>
    end loop;If you transform your code to do a single update statement, you'll gain a lot of seconds.
    Regards,
    Rob.

  • Database performance issue (8.1.7.0)

    Hi,
    We are having tablespace "payin" in our database (8.1.7.0) .
    This tablespace is the main Tablespace of our database which is dictionary managed and heavily accessed by the user SQL statements.
    Now we are facing the database performance issue during the peak time (i.e. at the month end) when no. of users use to run the no. of large reports.
    We have also increased the SGA sufficiently on the basis of RAM size.
    This tablespace is heavily accessed for the reports.
    Now my question is,
    Is this performance issue is because the tablespace is "dictionary managed" instead of locally managed ?
    because when i monitor the different sessions through OEM, the no. of hard parses is more for the connected users.
    Actually the hard parses should be less.
    In oracle 8.1.7.0 Can we convert dictionary managed tablespace to locally managed tablespace ?
    by doing so will the problem will get somewhat resolve ? will it reduce the overhead on the dictionary tables and on the shared memory ?
    If yes then how what is procedure to convert the tablespace from dictionary to locally managed ?
    With Regards

    If your end users are just running reports against this tablespace, I don't think that the tablespace management (LM/DM) matters here. You should be concerned more about the TEMP tablespace (for heavy sort operations) and your shared pool size (as you have seen hard parses go up).
    As already stated, get statspack running and also try tracing user sessions with wait events. Might give you more clues.

  • Performance issue - in 4.7 ( previously worked in ECC 6 only)

    Hi experts,
    I have worked on ECC 6.0 and now i have got a new job and now working in 4.7 for support project. I feel it is very difficult after working in 6.0.
    I have not used occurs 0, like etc.. statements in 6.0.
    In 4.7 what are the things that i should consider to avoid performance issues.
    plz guide me.
    thanks in advance.

    Hi Sakthi,
    I faced the same problem,. I am in support now for last 3 months...
    So let me share some of the tips and tricks I follow...
    In ECC 6.0 there is always less data... since the database may be from 2005 but not early than that...
    But for 4.7 EE it may be from 1997 also... like mine...
    Since of a huge database,,, some times the select statements you write will not work in PRD & QAS..
    You will get Request Timed Out.
    So you need to be more carefull at select statements,,, match as many as primary keys.. and learn Secondary indexes,,, and must should have a good idea on foreign key relations very well...
    About the like or Occurs..etc.  you dont worry about that,, that is very easy.... and if there is a modification you can write the code almost all like the ECC 6.0...
    In SAP Wiki, there are performance tips & Coding procedures.. Lokk at that it will be help full...
    Thanks & regards,
    Dileep .C

  • Insert performance issue with Partitioned Table.....

    Hi All,
    I have a performance issue during with a table which is partitioned. without table being partitioned
    it ran in less time but after partition it took more than double.
    1) The table was created initially without any partition and the below insert took only 27 minuts.
    Total Rec Inserted :- 2424233
    PL/SQL procedure successfully completed.
    Elapsed: 00:27:35.20
    2) Now I re-created the table with partition(range yearly - below) and the same insert took 59 minuts.
    Is there anyway i can achive the better performance during insert on this partitioned table?
    [ similerly, I have another table with 50 Million records and the insert took 10 hrs without partition.
    with partitioning the table, it took 18 hours... ]
    SQL> select * from table(dbms_xplan.display);
    PLAN_TABLE_OUTPUT
    Plan hash value: 4195045590
    | Id | Operation | Name | Rows | Bytes |TempSpc| Cost (%CPU)| Time |
    | 0 | SELECT STATEMENT | | 643K| 34M| | 12917 (3)| 00:02:36 |
    |* 1 | HASH JOIN | | 643K| 34M| 2112K| 12917 (3)| 00:02:36 |
    | 2 | VIEW | index$_join$_001 | 69534 | 1290K| | 529 (3)| 00:00:07 |
    |* 3 | HASH JOIN | | | | | | |
    | 4 | INDEX FAST FULL SCAN| PK_ACCOUNT_MASTER_BASE | 69534 | 1290K| | 181 (3)| 00:00
    | 5 | INDEX FAST FULL SCAN| ACCOUNT_MASTER_BASE_IDX2 | 69534 | 1290K| | 474 (2)| 00:00
    PLAN_TABLE_OUTPUT
    | 6 | TABLE ACCESS FULL | TB_SISADMIN_BALANCE | 2424K| 87M| | 6413 (4)| 00:01:17 |
    Predicate Information (identified by operation id):
    1 - access("A"."VENDOR_ACCT_NBR"=SUBSTR("B"."ACCOUNT_NO",1,8) AND
    "A"."VENDOR_CD"="B"."COMPANY_NO")
    3 - access(ROWID=ROWID)
    Open C1;
    Loop
    Fetch C1 Bulk Collect Into C_Rectype Limit 10000;
    Forall I In 1..C_Rectype.Count
    Insert test
         col1,col2,col3)
    Values
         val1, val2,val3);
    V_Rec := V_Rec + Nvl(C_Rectype.Count,0);
    Commit;
    Exit When C_Rectype.Count = 0;
    C_Rectype.delete;
    End Loop;
    End;
    Total Rec Inserted :- 2424233
    PL/SQL procedure successfully completed.
    Elapsed: 00:51:01.22
    Edited by: user520824 on Jul 16, 2010 9:16 AM

    I'm concerned about the view in step 2 and the index join in step 3. A composite index with both columns might eliminate the index join and result in fewer read operations.
    If you know which partition the data is going into beforehand you can save a little bit of processing by specifying the partition (which may not be a scalable long-term solution) in the insert - I'm not 100% sure you can do this on inserts but I know you can on selects.
    The APPEND hint won't help the way you are using it - the VALUES clause in an insert makes it be ignored. Where it is effective and should help you is if you can do the insert in one query - insert into/select from. If you are using the loop to avoid filling up undo/rollback you can use a bulk collect to batch the selects and commit accordingly - but don't commit more often than you have to because more frequent commits slow transactions down.
    I don't think there is a nologging hint :)
    So, try something like
    insert /*+ hints */ into ...
    Select
         A.Ing_Acct_Nbr, currency_Symbol,
         Balance_Date,     Company_No,
         Substr(Account_No,1,8) Account_No,
         Substr(Account_No,9,1) Typ_Cd ,
         Substr(Account_No,10,1) Chk_Cd,
         Td_Balance,     Sd_Balance,
         Sysdate,     'Sisadmin'
    From Ideaal_Cons.Tb_Account_Master_Base A,
         Ideaal_Staging.Tb_Sisadmin_Balance B
    Where A.Vendor_Acct_Nbr = Substr(B.Account_No,1,8)
       And A.Vendor_Cd = b.company_no
          ;Edited by: riedelme on Jul 16, 2010 7:42 AM

Maybe you are looking for

  • Photoshop CS4  shuts down only when creating new project.

    I've had CS4 web premium since nov. 08 and everything worked fine until the most recent update from adobe. I ONLY have this problem with Photoshop cs4: when creating a new project after selecting the aspects (name, width, hight, etc.) when you press

  • Autodiscover is ok with selfssl but problem in outlook exchange 2013

    hi, Finally i can setup autodiscover service with self ssl, here is the result   Submit   Connectivity Test Successful Test Details The Microsoft Connectivity Analyzer is attempting to test Autodiscover for [email protected]. Autodiscover

  • How can I get this program to work -- Adobe Send or Adobe SendNow

    How can I get this program to work?  I have used Adobe SendNow successfully for several years and am on automatic renewal and getting nothing but frustration for my money!  I have been unsuccessful at sending anything in the past several months.  Tod

  • 5300 memory card problem

    My memory card claims to be full, which it should not be. When I go and look at the contents of the card it does not show enough data for the card to be full. There are a bunch of mp3 files that are showing up in my music library, but are no longer l

  • How to delete printer preferences

    When I try to go and change the default printer, it reverts back to previous printer. When I try to "use last used printer" option, print to new printer, then go back to print the default has changed back to the old printer. When you go to the printe