Changed query to include new table

I want to include new table recv_pmt_woff_recov in this query and do not want to change select stmt column list
the table recv_pmt_woff_recov is similar in terms of data to txn_settlement.
join columns for new table are liq_txn_ref_num,liq_txn_seq_num.
I want to do a union all with txn_settlement and select list should not chnage.
Also query should use the indexes and no full table scan should be performed
Assuming existing join columns are indexed and index is also present on liq_txn_ref_num,liq_txn_seq_num.
Data may/may not be present in this new table.
Pl suggest a optimized query for this.
existing query is below :
SELECT BILL_DETAILS_CURR_HIST.UPDATE_SERIAL_NUM, BILL_DETAILS_CURR_HIST.BILL_REF_NUM, BILL_DETAILS_CURR_HIST.TIMESTAMP, TXN_SETTLEMENT.SETL_STATUS, TXN_SETTLEMENT.SETL_DATE, TXN_SETTLEMENT.CHARGE_TYPE, TXN_SETTLEMENT.CHARGE_AMT, TXN_SETTLEMENT.CUST_NUM, TXN_SETTLEMENT.FX_CONTRACT_AMT, TXN_SETTLEMENT.SERIAL_NUM, TXN_SETTLEMENT.WHEN_TO_LIQ, TXN_SETTLEMENT.PARTY_TO_CHARGE, TXN_SETTLEMENT.DR_CR_IND, TXN_SETTLEMENT.SETL_MTHD, TXN_SETTLEMENT.CONS_POST_IND, TXN_SETTLEMENT.COLLECTION_TYPE, TXN_SETTLEMENT.COLLECTION_DAY, TXN_SETTLEMENT.LIQUIDATE_DATE, TXN_SETTLEMENT.SETL_CCY, TXN_SETTLEMENT.SETL_AMT, TXN_SETTLEMENT.FLG_ADVICE_ADVANCE, TXN_SETTLEMENT.BANK_CLRNG_HOUSE_ID, TXN_SETTLEMENT.DATE_PAID, TXN_SETTLEMENT.FX_CONTRACT_EXPIRY_DATE, TXN_SETTLEMENT.FX_CONTRACT_RATE, TXN_SETTLEMENT.AMEND_ACTION, TXN_SETTLEMENT.CHARGE_CCY_CODE, TXN_SETTLEMENT.MIS_SPREAD, TXN_SETTLEMENT.INVERSE_EXCHANGE_RATE_FLAG, TRANSACTIONS.COUNTRY_CODE, TRANSACTIONS.LEG_VEH, MAP_SCHEMA_USAGE.SCHEMA_NAME, TRANSACTIONS.LEG_VEH_STATE, TRANSACTIONS.COUNTRY_STATE
FROM
TXN_SETTLEMENT, TRANSACTIONS, MAP_SCHEMA_USAGE, BILL_DETAILS_CURR_HIST
WHERE
BILL_DETAILS_CURR_HIST.UPDATE_SERIAL_NUM > ( SELECT LAST_SEQUENCE FROM MAP_TIMESTAMP,MAP_SCHEMA_USAGE
WHERE MAPPING_NAME = 'map_trims_bill_curr_r2_settlement' AND
MAP_TIMESTAMP.SCHEMA_NAME = MAP_SCHEMA_USAGE.SCHEMA_NAME )
AND
BILL_DETAILS_CURR_HIST.UPDATE_SERIAL_NUM <=
( SELECT LAST_SEQUENCE FROM MAP_TIMESTAMP,MAP_SCHEMA_USAGE WHERE MAPPING_NAME ='map_trims_bill_curr_r2_doc_boe_rec' AND
MAP_TIMESTAMP.SCHEMA_NAME = MAP_SCHEMA_USAGE.SCHEMA_NAME )
AND
BILL_DETAILS_CURR_HIST.OLD_BILL_STATUS = 'IPR'
AND BILL_DETAILS_CURR_HIST.BILL_STATUS = 'FIN'
AND TXN_SETTLEMENT.WHEN_TO_LIQ != 'INI'
OR
BILL_DETAILS_CURR_HIST.OLD_BILL_STATUS = 'IPR'
AND BILL_DETAILS_CURR_HIST.BILL_STATUS = 'PAY'
AND TXN_SETTLEMENT.WHEN_TO_LIQ != 'INI'
OR
BILL_DETAILS_CURR_HIST.OLD_BILL_STATUS = 'IPR'
AND BILL_DETAILS_CURR_HIST.BILL_STATUS = 'LIQ'
AND TXN_SETTLEMENT.WHEN_TO_LIQ != 'INI'
OR
BILL_DETAILS_CURR_HIST.OLD_BILL_STATUS = 'IPR'
AND BILL_DETAILS_CURR_HIST.BILL_STATUS = 'OST'
AND TXN_SETTLEMENT.WHEN_TO_LIQ != 'INI'
OR
BILL_DETAILS_CURR_HIST.OLD_BILL_STATUS = 'IPR'
AND BILL_DETAILS_CURR_HIST.BILL_STATUS = 'CLS'
AND TXN_SETTLEMENT.WHEN_TO_LIQ != 'INI'
AND TXN_SETTLEMENT.SEQ_NUM = '000'
AND TRANSACTIONS.SEQ_NUM = '000'
AND
TXN_SETTLEMENT.REF_NUM =
BILL_DETAILS_CURR_HIST.BILL_REF_NUM AND
TRANSACTIONS.REF_NUM = BILL_DETAILS_CURR_HIST.BILL_REF_NUM AND
TRANSACTIONS.REF_NUM = TXN_SETTLEMENT.REF_NUM AND TRANSACTIONS.SEQ_NUM = TXN_SETTLEMENT.SEQ_NUM
ORDER BY
BILL_DETAILS_CURR_HIST.UPDATE_SERIAL_NUM, BILL_DETAILS_CURR_HIST.BILL_REF_NUM

Below query is using inline view txn_settlement.
I would like to establish a join with another table bill_details_curr_hist which is in from clause.
But there is an error raised invalid identifier.
How can I achieve a correlated join in the inline view with another table in from clause
existing query is below :
SELECT BILL_DETAILS_CURR_HIST.UPDATE_SERIAL_NUM,
BILL_DETAILS_CURR_HIST.BILL_REF_NUM,
BILL_DETAILS_CURR_HIST.TIMESTAMP,
TXN_SETTLEMENT.SETL_STATUS, TXN_SETTLEMENT.SETL_DATE,
TXN_SETTLEMENT.CHARGE_TYPE,
TXN_SETTLEMENT.CHARGE_AMT, TXN_SETTLEMENT.CUST_NUM,
TXN_SETTLEMENT.FX_CONTRACT_AMT,
TXN_SETTLEMENT.SERIAL_NUM,
TXN_SETTLEMENT.WHEN_TO_LIQ,
TXN_SETTLEMENT.PARTY_TO_CHARGE,
TXN_SETTLEMENT.DR_CR_IND, TXN_SETTLEMENT.SETL_MTHD,
TXN_SETTLEMENT.CONS_POST_IND,
TXN_SETTLEMENT.COLLECTION_TYPE,
TXN_SETTLEMENT.COLLECTION_DAY,
TXN_SETTLEMENT.LIQUIDATE_DATE,
TXN_SETTLEMENT.SETL_CCY, TXN_SETTLEMENT.SETL_AMT,
TXN_SETTLEMENT.FLG_ADVICE_ADVANCE,
TXN_SETTLEMENT.BANK_CLRNG_HOUSE_ID,
TXN_SETTLEMENT.DATE_PAID,
TXN_SETTLEMENT.FX_CONTRACT_EXPIRY_DATE,
TXN_SETTLEMENT.FX_CONTRACT_RATE,
TXN_SETTLEMENT.AMEND_ACTION,
TXN_SETTLEMENT.CHARGE_CCY_CODE,
TXN_SETTLEMENT.MIS_SPREAD,
TXN_SETTLEMENT.INVERSE_EXCHANGE_RATE_FLAG,
TRANSACTIONS.COUNTRY_CODE, TRANSACTIONS.LEG_VEH,
MAP_SCHEMA_USAGE.SCHEMA_NAME,
TRANSACTIONS.LEG_VEH_STATE,
TRANSACTIONS.COUNTRY_STATE
FROM
TRANSACTIONS, MAP_SCHEMA_USAGE,
BILL_DETAILS_CURR_HIST ,(
select REF_NUM,SEQ_NUM,SETL_STATUS, SETL_DATE, CHARGE_TYPE, CHARGE_AMT, CUST_NUM, FX_CONTRACT_AMT, SERIAL_NUM, WHEN_TO_LIQ, PARTY_TO_CHARGE, DR_CR_IND, SETL_MTHD, CONS_POST_IND, COLLECTION_TYPE, COLLECTION_DAY, LIQUIDATE_DATE, SETL_CCY, SETL_AMT, FLG_ADVICE_ADVANCE, BANK_CLRNG_HOUSE_ID, DATE_PAID, FX_CONTRACT_EXPIRY_DATE, FX_CONTRACT_RATE, AMEND_ACTION, CHARGE_CCY_CODE, MIS_SPREAD, INVERSE_EXCHANGE_RATE_FLAG
from txn_settlement a
where charge_type not like 'RL%'
and a.ref_num = BILL_DETAILS_CURR_HIST.BILL_REF_NUM
and a.seq_num = '000'
union all
select LIQ_TXN_REF_NUM REF_NUM,LIQ_TXN_SEQ_NUM SEQ_NUM,null SETL_STATUS, null SETL_DATE, CHARGE_TYPE, posting_amt CHARGE_AMT, recv_pmt_woff_recov.CUST_NUM CUST_NUM,null FX_CONTRACT_AMT, pmt_serial_num SERIAL_NUM, WHEN_TO_LIQ, party_type PARTY_TO_CHARGE, 'D' DR_CR_IND, null SETL_MTHD,null CONS_POST_IND,null COLLECTION_TYPE, null COLLECTION_DAY,null LIQUIDATE_DATE, SETL_CCY, recv_pmt_woff_recov.SETL_AMT SETL_AMT,null FLG_ADVICE_ADVANCE,null BANK_CLRNG_HOUSE_ID,null DATE_PAID,null FX_CONTRACT_EXPIRY_DATE,chg_setl_fx_rate FX_CONTRACT_RATE,null AMEND_ACTION,posting_ccy CHARGE_CCY_CODE,null MIS_SPREAD,null INVERSE_EXCHANGE_RATE_FLAG
from recv_pmt_woff_recov,acct_recv
where recv_pmt_woff_recov.link_num = acct_recv.link_num
and recv_pmt_woff_recov.liq_txn_ref_num = BILL_DETAILS_CURR_HIST.BILL_REF_NUM
and recv_pmt_woff_recov.liq_txn_seq_num = '000'
) TXN_SETTLEMENT
WHERE
BILL_DETAILS_CURR_HIST.UPDATE_SERIAL_NUM > ( SELECT
LAST_SEQUENCE FROM MAP_TIMESTAMP,MAP_SCHEMA_USAGE
HERE MAPPING_NAME =
'map_trims_bill_curr_r2_settlement' AND
MAP_TIMESTAMP.SCHEMA_NAME =
MAP_SCHEMA_USAGE.SCHEMA_NAME )
AND
BILL_DETAILS_CURR_HIST.UPDATE_SERIAL_NUM <=
( SELECT LAST_SEQUENCE FROM
MAP_TIMESTAMP,MAP_SCHEMA_USAGE WHERE MAPPING_NAME
='map_trims_bill_curr_r2_doc_boe_rec' AND
MAP_TIMESTAMP.SCHEMA_NAME =
MAP_SCHEMA_USAGE.SCHEMA_NAME )
AND
BILL_DETAILS_CURR_HIST.OLD_BILL_STATUS = 'IPR'
AND BILL_DETAILS_CURR_HIST.BILL_STATUS = 'FIN'
AND TXN_SETTLEMENT.WHEN_TO_LIQ != 'INI'
OR
BILL_DETAILS_CURR_HIST.OLD_BILL_STATUS = 'IPR'
AND BILL_DETAILS_CURR_HIST.BILL_STATUS = 'PAY'
AND TXN_SETTLEMENT.WHEN_TO_LIQ != 'INI'
OR
BILL_DETAILS_CURR_HIST.OLD_BILL_STATUS = 'IPR'
AND BILL_DETAILS_CURR_HIST.BILL_STATUS = 'LIQ'
AND TXN_SETTLEMENT.WHEN_TO_LIQ != 'INI'
OR
BILL_DETAILS_CURR_HIST.OLD_BILL_STATUS = 'IPR'
AND BILL_DETAILS_CURR_HIST.BILL_STATUS = 'OST'
AND TXN_SETTLEMENT.WHEN_TO_LIQ != 'INI'
OR
BILL_DETAILS_CURR_HIST.OLD_BILL_STATUS = 'IPR'
AND BILL_DETAILS_CURR_HIST.BILL_STATUS = 'CLS'
AND TXN_SETTLEMENT.WHEN_TO_LIQ != 'INI'
AND TXN_SETTLEMENT.SEQ_NUM = '000'
AND TRANSACTIONS.SEQ_NUM = '000'
AND
TXN_SETTLEMENT.REF_NUM =
ILL_DETAILS_CURR_HIST.BILL_REF_NUM AND
TRANSACTIONS.REF_NUM =
BILL_DETAILS_CURR_HIST.BILL_REF_NUM AND
TRANSACTIONS.REF_NUM = TXN_SETTLEMENT.REF_NUM AND
TRANSACTIONS.SEQ_NUM = TXN_SETTLEMENT.SEQ_NUM
ORDER BY
BILL_DETAILS_CURR_HIST.UPDATE_SERIAL_NUM,
BILL_DETAILS_CURR_HIST.BILL_REF_NUM

Similar Messages

  • How to create an explain plan with rowsource statistics for a complex query that include multiple table joins ?

    1. How to create an explain plan with rowsource statistics for a complex query that include multiple table joins ?
    When multiple tables are involved , and the actual number of rows returned is more than what the explain plan tells. How can I find out what change is needed  in the stat plan  ?
    2. Does rowsource statistics gives some kind of  understanding of Extended stats ?

    You can get Row Source Statistics only *after* the SQL has been executed.  An Explain Plan midway cannot give you row source statistics.
    To get row source statistics either set STATISTICS_LEVEL='ALL'  in the session that executes theSQL OR use the Hint "gather_plan_statistics"  in the SQL being executed.
    Then use dbms_xplan.display_cursor
    Hemant K Chitale

  • V/LB - Change Pricing Report, Add new table

    Hi Experts,
    I am trying to add a new pricing table value to a price report via transaction V/LB. When I select the report to change and hit Table SAP button, there is not new entry or change button available only the delete functionality!! Can you please advise how I can add a new table value in the price report V/LB?
    I have searched the database and a similar question has been asked previously but there is no reply per below link, can you please assist how add new condition value in the table of the report.
    New Condition Table in V/LB
    Thankyou

    Hi
    It is disabled in change mode (V/LB), only you would be able to select the tables in V/LA.
    thank you

  • How to adjust adhoc query when the database table changed.

    DearFreinds,
            I have created an adhoc Query , however after few days there was a requirement to remove some fields and change the length of some fields . Now the Adhoc query showing as to adjust , however when iam trying to adjust nothing is happening. Could any one let me know how to adjust with the table strucutre in the adhoc query.
    Regards
    madhu.

    Hi Sumit,
           Yes i have adjusted the database from se11 itself by going into utlity > adjust database . However i can still see the
    adhoc query -> infoset asking me when iam trying to go in change mode saying the database table has been changed do you want to adjust . 
    I have adjusted by going to more functions still there is no change. Please let me know what exactly i have to do.
    regards
    madhu

  • Query on Custom schema table getting 'Do You want to save changes' message

    Hi all,
    I am getting the error message whenever I query from the custom table. Here is the description of the issue.
    1. I have a button on the first block. When the user clicks on this button , execute_query is called on another block. Then the user getting the message "do you want to save the changes" . Although there are no changes made on the form.
    2. The second block is based on a custom table defined on the custom schema.
    3. Created public synonym for this table and also gave the all the grants to apps schema on this table.
    4. This error was not coming before when the second block is based on Apps schema table.
    Any idea on how to fix this.
    Thanks

    I doubt the problem is related to a schema or public synonym. The message is appearing because the value for an item is being changed after your query executes.
    First check to make sure that the blocks that are not based on a table are set to No. See the Database Data Block Property.
    If that doesn't solve the problem, then run the debugger and watch to see what item is changing. You may be doing something in a trigger.

  • How to include a table multiple times in InfoSet Query

    Hi,
    Is it possible to include a table more than once in an InfoSet Query, I created an InfoSet Query and Include Table A, then included Table B now i want to join Column 1 and 3 of Table A to be join with column1 of Table B, in other words, Table B is a Master data table, and Table A is a transaction data table, how this can be acheived.
    Thanks
    Akila. R

    You cannot achieve this.
    Instead, compound the column 1 with column 3 info objects and create join with Table B.
    This will result in same intended functionality.

  • Error while creating new table

    Hi
    First I had deleted one custom table. Then I try to create the same table with different fields. Now I am gettinh the errors.
    1.    ZLV_COMP_TABLE: Inconsistency DD <-> DB (check table with analysis tool)
    2.   A table called ZLV_COMP_TABLE exists in the database
    3.   No active nametab exists for ZLV_COMP_TABLE
    4.   Termination due to inconsistencies
    5.  Table ZLV_COMP_TABLE (Statements could not be generated)
      6.  Error number in DD_DECIDE (9)
    Please help me ... how can I create a table with same name.
    thanks
    Subhankar

    Hello,
    goto SE14 -- Give the table name
    use Tables Radio button
    click on Edit
    check the Delete Data Radio button
    click on ACTIVATE AND ADJUST DATABASE.
    Now check the table in SE11.
    If it still exits you can change the same table or Delete it again & create a new table with Same Name.
    This might help your query.
    Anil.

  • New table in Report painter

    Dear All,
    Can some one guide me as to how to include the new table FAGLFLEXT in the report painter reports.
    Thanks and Regards,
    Gokul.

    Hi,
    use KE5B to make the FSV nodes available in Report painter reports. This function can be used to create/change sets based on FSV.
    Best regards, Christian

  • I don't have the option to create a new table in my database

    Hi, I have SQL 2014, for some reason, a few weeks ago I was able to create a new table for my databases, now, I don't have the option to do so, all I get when I right click on tables is "Tables..." instead of "New Table", when I select
    "Tables..." it allows me to write my codes but it doesn't save except in a text files witch doesn't want to open when I want to resume my work.
    Is there any option in SQL to activate the "New Table" by right clicking on Tables? I have all the rights and permissions available to my use as it's my computer and my own server. 
    To fix the issue I tried
    -Re-installing SQL
    -Changed for 4 different versions
    -Allowing more than necessary permissions 
    -Making it public to use (it's only to test it, anyway, I'm in the learning phase)
    Any kind of help or advice will be welcome and much appreciated.
    Thank you!

    I don't know the reason why you are experiencing this, so what I am describing below are just some steps that you can take to diagnose the problem if it is related to a permissions issue.
    1. If you are not already on the computer where SQL Server is installed, login on that box and launch SQL Server Management Studio and run it as Administrator.
    2. Connect to the Server and open up the object explorer and expand the Security node that is directly under the Sever tab.
    3. Under logins, find your login, right-click properties and see what kind of server level permissions you have, and which databases you are mapped to and what kind of permissions you have on those.
    It may just be that this is not related to permissions at all, but I can't think of what that would be. To test whether it is a SSMS GUI problem or not, open up a query window and select a database on which you think you have permissions to create tables.
    Then create a test table using T-SQL, for example:
    create table dbo.MyTestTable( col1 int);

  • Trying to add a new table to report.

    I have a report tied into a database that I have added a new table in. When I add the table and link the required fields, then run the report I get this error:
    "Query Engine Error: If Tables are already linked then the join type cannot change"
    Can someone point me in the right direction to get this resolved.
    Thanks,

    Hi,
    This error genrally occurs when the schema of the database differes (ex..field name) from what the report was designed.
    Hope this information helps.
    Regards,
    Sumit Kanhe

  • Select Query failing on a  table that has per sec heavy insertions.

    Hi
    Problem statement
    1- We are using 11g as an database.
    2- We have a table that is partitioned on the date as range partition.
    3- The insertion of data is very high.i.e. several hundreds records per sec. in the current partitioned.
    4- The data is continuously going in the current partitioned as and when buffer is full or per sec timer expires.
    5-- We have to make also select query on the same table and on the current partitioned say for the latest 500 records.
    6- Effecient indexes are also created on the table.
    Solutions Tried.
    1- After analyzing by tkprof it is observed that select and execute is working fine but fetch is taking too much time to show the out put. Say it takes 1 hour.
    2- Using the 11g sql advisior and SPM several baseline is created but the success rate of them is observed also too low.
    please suggest any solution to this issue
    1- i.e. Redisgn of table.
    2- Any better way to quey to fix the fetch issue.
    3- Any oracle seetings or parameter changes to fix the fetch issue.
    Thanks in advance.
    Regards
    Vishal Sharma

    I am uploading the latest stats please let me know how can improve as this is taking 25 minutes
    ####TKPROF output#########
    SQL ID : 2j5w6bv437cak
    select almevttbl.AlmEvtId, almevttbl.AlmType, almevttbl.ComponentId,
      almevttbl.TimeStamp, almevttbl.Severity, almevttbl.State,
      almevttbl.Category, almevttbl.CauseCode, almevttbl.UnitType,
      almevttbl.UnitId, almevttbl.UnitName, almevttbl.ServerName,
      almevttbl.StrParam, almevttbl.ExtraStrParam, almevttbl.ExtraStrParam2,
      almevttbl.ExtraStrParam3, almevttbl.ParentCustId, almevttbl.ExtraParam1,
      almevttbl.ExtraParam2, almevttbl.ExtraParam3,almevttbl.ExtraParam4,
      almevttbl.ExtraParam5, almevttbl.SRCIPADDRFAMILY,almevttbl.SrcIPAddress11,
      almevttbl.SrcIPAddress12,almevttbl.SrcIPAddress13,almevttbl.SrcIPAddress14,
      almevttbl.DESTIPADDRFAMILY,almevttbl.DestIPAddress11,
      almevttbl.DestIPAddress12,almevttbl.DestIPAddress13,
      almevttbl.DestIPAddress14,  almevttbl.DestPort, almevttbl.SrcPort,
      almevttbl.SessionDir, almevttbl.CustomerId, almevttbl.ProfileId,
      almevttbl.ParentProfileId, almevttbl.CustomerName, almevttbl.AttkDir,
      almevttbl.SubCategory, almevttbl.RiskCategory, almevttbl.AssetValue,
      almevttbl.IPSAction, almevttbl.l4Protocol,almevttbl.ExtraStrParam4 ,
      almevttbl.ExtraStrParam5,almevttbl.username,almevttbl.ExtraStrParam6,
      IpAddrFamily1,IPAddrValue11,IPAddrValue12,IPAddrValue13,IPAddrValue14,
      IpAddrFamily2,IPAddrValue21,IPAddrValue22,IPAddrValue23,IPAddrValue24
    FROM
           AlmEvtTbl PARTITION(ALMEVTTBLP20100323) WHERE AlmEvtId IN ( SELECT  * FROM
      ( SELECT /*+ FIRST_ROWS(1000) INDEX (AlmEvtTbl AlmEvtTbl_Index) */AlmEvtId
      FROM AlmEvtTbl PARTITION(ALMEVTTBLP20100323) where       ((AlmEvtTbl.Customerid
      = 0 or AlmEvtTbl.ParentCustId = 0))  ORDER BY AlmEvtTbl.TIMESTAMP DESC) 
      WHERE ROWNUM  <  602) order by timestamp desc
    call     count       cpu    elapsed       disk      query    current        rows
    Parse        1      0.10       0.17          0          0          0           0
    Execute      1      0.00       0.00          0          0          0           0
    Fetch       42   1348.25    1521.24       1956   39029545          0         601
    total       44   1348.35    1521.41       1956   39029545          0         601
    Misses in library cache during parse: 1
    Optimizer mode: FIRST_ROWS
    Parsing user id: 82 
    Rows     Row Source Operation
        601  PARTITION RANGE SINGLE PARTITION: 24 24 (cr=39029545 pr=1956 pw=1956 time=11043 us cost=0 size=7426 card=1)
        601   TABLE ACCESS BY LOCAL INDEX ROWID ALMEVTTBL PARTITION: 24 24 (cr=39029545 pr=1956 pw=1956 time=11030 us cost=0 size=7426 card=1)
        601    INDEX FULL SCAN ALMEVTTBL_INDEX PARTITION: 24 24 (cr=39029377 pr=1956 pw=1956 time=11183 us cost=0 size=0 card=1)(object id 72557)
        601     FILTER  (cr=39027139 pr=0 pw=0 time=0 us)
    169965204      COUNT STOPKEY (cr=39027139 pr=0 pw=0 time=24859073 us)
    169965204       VIEW  (cr=39027139 pr=0 pw=0 time=17070717 us cost=0 size=13 card=1)
    169965204        PARTITION RANGE SINGLE PARTITION: 24 24 (cr=39027139 pr=0 pw=0 time=13527031 us cost=0 size=48 card=1)
    169965204         TABLE ACCESS BY LOCAL INDEX ROWID ALMEVTTBL PARTITION: 24 24 (cr=39027139 pr=0 pw=0 time=10299895 us cost=0 size=48 card=1)
    169965204          INDEX FULL SCAN ALMEVTTBL_INDEX PARTITION: 24 24 (cr=1131414 pr=0 pw=0 time=3222624 us cost=0 size=0 card=1)(object id 72557)
    Elapsed times include waiting on following events:
      Event waited on                             Times   Max. Wait  Total Waited
      ----------------------------------------   Waited  ----------  ------------
      SQL*Net message to client                      42        0.00          0.00
      SQL*Net message from client                    42       11.54        133.54
      db file sequential read                      1956        0.20         28.00
      latch free                                     21        0.00          0.01
      latch: cache buffers chains                     9        0.01          0.02
    SQL ID : 0ushr863b7z39
    SELECT /* OPT_DYN_SAMP */ /*+ ALL_ROWS IGNORE_WHERE_CLAUSE
      NO_PARALLEL(SAMPLESUB) opt_param('parallel_execution_enabled', 'false')
      NO_PARALLEL_INDEX(SAMPLESUB) NO_SQL_TUNE */ NVL(SUM(C1),0), NVL(SUM(C2),0)
    FROM
    (SELECT /*+ IGNORE_WHERE_CLAUSE NO_PARALLEL("PLAN_TABLE") FULL("PLAN_TABLE")
      NO_PARALLEL_INDEX("PLAN_TABLE") */ 1 AS C1, CASE WHEN
      "PLAN_TABLE"."STATEMENT_ID"=:B1 THEN 1 ELSE 0 END AS C2 FROM
      "SYS"."PLAN_TABLE$" "PLAN_TABLE") SAMPLESUB
    call     count       cpu    elapsed       disk      query    current        rows
    Parse        1      0.00       0.00          0          0          0           0
    Execute      1      0.00       0.00          0          0          0           0
    Fetch        1      0.00       0.01          1          3          0           1
    total        3      0.00       0.01          1          3          0           1
    Misses in library cache during parse: 1
    Misses in library cache during execute: 1
    Optimizer mode: ALL_ROWS
    Parsing user id: 82     (recursive depth: 1)
    Rows     Row Source Operation
          1  SORT AGGREGATE (cr=3 pr=1 pw=1 time=0 us)
          0   TABLE ACCESS FULL PLAN_TABLE$ (cr=3 pr=1 pw=1 time=0 us cost=29 size=138856 card=8168)
    Elapsed times include waiting on following events:
      Event waited on                             Times   Max. Wait  Total Waited
      ----------------------------------------   Waited  ----------  ------------
      db file sequential read                         1        0.01          0.01
    SQL ID : bjkdb51at8dnb
    EXPLAIN PLAN SET STATEMENT_ID='PLUS30350011' FOR select almevttbl.AlmEvtId,
      almevttbl.AlmType, almevttbl.ComponentId, almevttbl.TimeStamp,
      almevttbl.Severity, almevttbl.State, almevttbl.Category,
      almevttbl.CauseCode, almevttbl.UnitType, almevttbl.UnitId,
      almevttbl.UnitName, almevttbl.ServerName, almevttbl.StrParam,
      almevttbl.ExtraStrParam, almevttbl.ExtraStrParam2, almevttbl.ExtraStrParam3,
       almevttbl.ParentCustId, almevttbl.ExtraParam1, almevttbl.ExtraParam2,
      almevttbl.ExtraParam3,almevttbl.ExtraParam4,almevttbl.ExtraParam5,
      almevttbl.SRCIPADDRFAMILY,almevttbl.SrcIPAddress11,almevttbl.SrcIPAddress12,
      almevttbl.SrcIPAddress13,almevttbl.SrcIPAddress14,
      almevttbl.DESTIPADDRFAMILY,almevttbl.DestIPAddress11,
      almevttbl.DestIPAddress12,almevttbl.DestIPAddress13,
      almevttbl.DestIPAddress14,  almevttbl.DestPort, almevttbl.SrcPort,
      almevttbl.SessionDir, almevttbl.CustomerId, almevttbl.ProfileId,
      almevttbl.ParentProfileId, almevttbl.CustomerName, almevttbl.AttkDir,
      almevttbl.SubCategory, almevttbl.RiskCategory, almevttbl.AssetValue,
      almevttbl.IPSAction, almevttbl.l4Protocol,almevttbl.ExtraStrParam4 ,
      almevttbl.ExtraStrParam5,almevttbl.username,almevttbl.ExtraStrParam6,
      IpAddrFamily1,IPAddrValue11,IPAddrValue12,IPAddrValue13,IPAddrValue14,
      IpAddrFamily2,IPAddrValue21,IPAddrValue22,IPAddrValue23,IPAddrValue24 FROM 
           AlmEvtTbl PARTITION(ALMEVTTBLP20100323) WHERE AlmEvtId IN ( SELECT  * FROM
      ( SELECT /*+ FIRST_ROWS(1000) INDEX (AlmEvtTbl AlmEvtTbl_Index) */AlmEvtId
      FROM AlmEvtTbl PARTITION(ALMEVTTBLP20100323) where       ((AlmEvtTbl.Customerid
      = 0 or AlmEvtTbl.ParentCustId = 0))  ORDER BY AlmEvtTbl.TIMESTAMP DESC) 
      WHERE ROWNUM  <  602) order by timestamp desc
    call     count       cpu    elapsed       disk      query    current        rows
    Parse        1      0.28       0.26          0          0          0           0
    Execute      1      0.01       0.00          0          0          0           0
    Fetch        0      0.00       0.00          0          0          0           0
    total        2      0.29       0.27          0          0          0           0
    Misses in library cache during parse: 1
    Optimizer mode: FIRST_ROWS
    Parsing user id: 82 
    Elapsed times include waiting on following events:
      Event waited on                             Times   Max. Wait  Total Waited
      ----------------------------------------   Waited  ----------  ------------
      SQL*Net message to client                       1        0.00          0.00
      SQL*Net message from client                     1        0.00          0.00
    OVERALL TOTALS FOR ALL NON-RECURSIVE STATEMENTS
    call     count       cpu    elapsed       disk      query    current        rows
    Parse       13      0.71       0.96          3         10          0           0
    Execute     14      0.20       0.29          4        304         26          21
    Fetch       92   2402.17    2714.85       3819   70033708          0        1255
    total      119   2403.09    2716.10       3826   70034022         26        1276
    Misses in library cache during parse: 10
    Misses in library cache during execute: 6
    Elapsed times include waiting on following events:
      Event waited on                             Times   Max. Wait  Total Waited
      ----------------------------------------   Waited  ----------  ------------
      SQL*Net message to client                      49        0.00          0.00
      SQL*Net message from client                    48       29.88        163.43
      db file sequential read                      1966        0.20         28.10
      latch free                                     21        0.00          0.01
      latch: cache buffers chains                     9        0.01          0.02
      latch: session allocation                       1        0.00          0.00
    OVERALL TOTALS FOR ALL RECURSIVE STATEMENTS
    call     count       cpu    elapsed       disk      query    current        rows
    Parse      940      0.51       0.73          1          2         38           0
    Execute   3263      1.93       2.62          7       1998         43          23
    Fetch     6049      1.32       4.41        214      12858         36       13724
    total    10252      3.78       7.77        222      14858        117       13747
    Misses in library cache during parse: 172
    Misses in library cache during execute: 168
    Elapsed times include waiting on following events:
      Event waited on                             Times   Max. Wait  Total Waited
      ----------------------------------------   Waited  ----------  ------------
      db file sequential read                        88        0.04          0.62
      latch: shared pool                              8        0.00          0.00
      latch: row cache objects                        2        0.00          0.00
      latch free                                      1        0.00          0.00
      latch: session allocation                       1        0.00          0.00
       34  user  SQL statements in session.
    3125  internal SQL statements in session.
    3159  SQL statements in session.
    Trace file: ora11g_ora_2064.trc
    Trace file compatibility: 11.01.00
    Sort options: default
           6  sessions in tracefile.
          98  user  SQL statements in trace file.
        9111  internal SQL statements in trace file.
        3159  SQL statements in trace file.
          89  unique SQL statements in trace file.
       30341  lines in trace file.
        6810  elapsed seconds in trace file.
    ###################################### AutoTrace Output#################  
    Statistics
           3901  recursive calls
              0  db block gets
       39030275  consistent gets
           1970  physical reads
            140  redo size
         148739  bytes sent via SQL*Net to client
            860  bytes received via SQL*Net from client
             42  SQL*Net roundtrips to/from client
             73  sorts (memory)
              0  sorts (disk)
            601  rows processed

  • Change lock type on large table

    I have a table which is about 2G size. the lock type on this table is set as AllPage.  I think this cause performance issue when issue select query, many SH lock applied. I try to change the lock type to datarow, which should have no lock when issue select as document indicated.
    but when I try it with DBArtisan, it took longtime and never end. Then I stop the app and connect it again. I found the lock type do change it to datarow. but there is a new table name like mytab_3309ac22 created.
    then I try to change lock type again on other table and got following info:
    You can not run Alter Table Lock in this database because the 'select into/bulkcopy' option is off.
    Looks like dboption changed. So want to know if this is safe for data when change lock type? how to ensure it is done properly?

    Hi Kent,
    The problem is that allpages has different physical structure than datarows.
    Changing from one format to another requires to rewrite the whole table.
    There are a few ways to achieve this goal.
    a) running the alter table directly:
    alter table x lock datarows
    This command causes to make the change internally. This is the fastest way to do that, but it requries to set the 'select into' option on the database. And that might be problematic because it breaks the  the transaction log sequence.
    If you don't have the option set on the database you are not able to run the command.
    b) doing it manually:
    1. create a second table with datarows scheme
    2. copy the rows to the table
    3. dropping the old and renaming the new table
    If you have a very busy system with little time for downtime then that would be the only solution. You can create triggers to manually log all the changes on the old table if that process would take too long.
    At the end you would have to add all the foreign keys to the new table. And of course recreate the indexes.
    I don't exactly know what DBArtisan is doing, but since you don't have the 'select into' option then probably it is running the second option. This new table with name mytab_3309ac22 might be the effect of the operation broken in the middle. How many rows has this table have? And what is the scheme?
    If you have plenty of time then I would recommend to run the alter table command together with the 'select into' option. Anyway you won't have any progress bar during this operation so it is very hard to predict the time of the operation.
    HTH,
    Adam

  • Best way to change partition key on existing table

    Hi,
    Using Oracle 11.20.3 on AIX.
    We have a table about 800 million rows and 120gb in size.
    Want to try copies oif this table to evalaute different partitiong strategies.
    What is the quickest way to do this?
    Would have liked say datapump table 1 tro disk and datapumo import the data to new table but do the tables need to be of the same format.
    Thanks

    >
    Using Oracle 11.20.3 on AIX.
    We have a table about 800 million rows and 120gb in size.
    Want to try copies oif this table to evalaute different partitiong strategies.
    What is the quickest way to do this?
    Would have liked say datapump table 1 tro disk and datapumo import the data to new table but do the tables need to be of the same format.
    >
    First your subject asks a different question that the text you posted: Best way to change partition key on existing table. The answer to that question is YOU CAN'T. All data has to be moved to change the partition key since each partition/subpartition is in its own segment. You either create a new table or use DBMS_REDEFINITION to redefine the table online.
    Why do you want to export all data to a file first? That just adds to the time and cost of doing the op.
    What problem are you trying to use partitioning to solve? Performance? Data maintenance? For performance the appropriate partitioning key and whether to use subpartitions depends on the types of queries and the query predicates you typically use as well as the columns that may be suitable for partition keys.
    For maintenance a common method is to partition on a date by year/month/day so you can more easily load new daily/weekly/monthly data into its own partition or drop old data that no longer needs to be kept online.
    You should use a small subset of the data when testing your partitionings strategies.
    Can you do the partitioning offline in an outage window? If not then using the DBMS_REDEFINITION is your only option.
    Without knowing what you are trying to accomplish only general advice can be given. You even mentioned that you might want to use a different set of columns than the curren table has.
    A standard heap table uses ONE segment for its data (ignoring possible LOB segments). A partitioned/subpartitioned table uses ONE segment for each partition/subpartition. This means that ALL data must be moved to partition the table (unless you are only creating one partition).
    This means that every partitioning scheme that uses a different partition key requires ALL data to be moved again for that test.
    Provide some information about what problem you are trying to solve.
    >
    Is this quicker than datapump?
    >
    Yes - exporting the data simplying moves it all an additional time. Ok to export if you need a backup before you start.
    >
    Found artcle which talks about using merge option on datapump import to convert partitioned table to non-partitioned table.
    >
    How would that apply to you? That isn't what you said you wanted to do.

  • Creating a new table, to replace an old one

    Hi experts;
    I'm looking for an advice about what I'm going to do.
    This is the situation;
    I got a production table with more than 1,000,000,000 of records.
    the queries to this table are too slow, and I decided to create a new table, to replace this one.
    But my doubts are:
    How can I create a new table including the same structure as the old one, and with some data.
    I got this query:
    Create new_table as select * from old_table
    where trans_year = '2012';
    I know with this I got the structure, but what about indexes?
    My other doubt is, to replace the old one, just I drop it, and rename the new one?
    This table belong to an oracle 9i DB.
    Thanks for your comments.
    Al

    >
    How can I create a new table including the same structure as the old one, and with some data.
    I got this query:
    Create new_table as select * from old_table
    where trans_year = '2012';
    I know with this I got the structure, but what about indexes?
    My other doubt is, to replace the old one, just I drop it, and rename the new one?
    This table belong to an oracle 9i DB.
    >
    You could export the table metadata and then import it as a renamed table. That would keep the indexes, triggers, constraints.
    But for one table it may be easier to just recreate the indexes and other objects.
    For the query you may want to disable logging of the INSERT.
    Create table new_table NOLOGGING as select * from old_table where trans_year = '2012';
    -- set the table back to logging mode
    ALTER table new_table LOGGING;Then you can drop the old table (if you already have a backup of it) and rename the new one to the old name.
    The create will perform much better with NOLOGGING but because it is not logged you will not be able to recover the data from the log files.
    So you should take a backup afterward.

  • Create a new table using repeating rows

    Hi all,
    My team members are entering data into the below shortened table.
    I want to import this table to my model (a different excel file), but I need to manipulate the data in order to use it.
    The team members table (MaterialTypeTbl) looks like:
    For every Material Type (column B) they enter all the suppliers that can work with us. Suppliers are split by *.
    [Note: the * is a symbol I requested them to use, because supplier names can contain commas (which was my preferred option) and I thought that it can create confusion when extracted by query formulas. So basically you can change the * to anything that
    can promote the solution]
    My ideal new table will look like:
    Which is repeating the row, as the number of suppliers that we have in the "supplier" cell.
    <gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="0f146976-cd66-42bf-a9a9-efb81fa43ab7" id="61366336-c5f8-402a-877c-9c08b112bd3d">i.e.</gs>: material 1 (Metal) had 3 suppliers, so in the new table I
    have 3 rows with only one name in each "supplier" cell.
    The purpose is to later on use the new table for pivots.
    The source table can have more columns + I need to have the data about the extracted file (file name, item name, date modified, date created, date accessed)
    I have a folder which contains several files like this (with one sheet and one table in the same format as the other) and I need to extract all and consolidate to one table in my power pivot model with the desired formation.
    Appreciate any help!
    עמית

    It looks as if anything can go wrong- it will :)
    <gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="bf577f31-b5b2-4e33-8295-b503a9a6b597" id="31c7f26a-d1ac-4302-ab8a-934ec6378fda">let</gs>
        Source = Folder.Files("C:\Users\amendels\Desktop\MI Data Warehouse\Material Type"),
        #"Removed Other Columns" = Table<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="d16c857e-92ce-49fd-b50d-234fb6306a78" id="00650cfc-ee61-400b-bce8-93f3d08e806c">.</gs><gs class="GINGER_SOFTWARE_mark"
    ginger_software_uiphraseguid="d16c857e-92ce-49fd-b50d-234fb6306a78" id="05a88f4a-cc9a-412e-839a-a3eee9793fc0">SelectColumns</gs><gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="d16c857e-92ce-49fd-b50d-234fb6306a78"
    id="ec79ede6-2a0b-449d-8733-e8b743d0ca0b">(</gs>Source<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="d16c857e-92ce-49fd-b50d-234fb6306a78" id="833506c2-9e6b-4877-85db-e59e0106a415">,</gs>{"Name",
    "Folder Path"}),
        #"Merged Columns" = Table<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="37c10c24-bce5-4d43-835c-46ed9cf2d5ed" id="19ec4b4d-02d5-4779-b1d8-70e054a55169">.</gs><gs class="GINGER_SOFTWARE_mark"
    ginger_software_uiphraseguid="37c10c24-bce5-4d43-835c-46ed9cf2d5ed" id="75be0e50-829a-44f8-bce0-c5c650ef25b1">CombineColumns</gs><gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="37c10c24-bce5-4d43-835c-46ed9cf2d5ed"
    id="4c6d962d-3323-4a2d-af5d-deb764c514cd">(</gs>#"Removed Other Columns"<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="37c10c24-bce5-4d43-835c-46ed9cf2d5ed" id="3946184a-730d-40e8-91ea-fd2d81275202">,</gs>{"Folder
    Path", "Name"}<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="37c10c24-bce5-4d43-835c-46ed9cf2d5ed" id="c6268911-7d6e-43c5-8814-bad897f5a05a">,</gs>Combiner<gs class="GINGER_SOFTWARE_mark"
    ginger_software_uiphraseguid="37c10c24-bce5-4d43-835c-46ed9cf2d5ed" id="dccb45ac-42e5-4a27-a150-c7d78d286814">.</gs>CombineTextByDelimiter<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="37c10c24-bce5-4d43-835c-46ed9cf2d5ed"
    id="edb72e5c-842a-4887-be04-b12fea521766">(</gs>"", QuoteStyle<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="37c10c24-bce5-4d43-835c-46ed9cf2d5ed" id="0ab10849-2fe6-4841-a97a-84732c546fc2">.</gs>None),"path"),
        #"Added Custom" = Table<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="230c9450-a9fa-4694-ba10-df8a0eefbcfa" id="f66eebe2-c524-4cee-91df-e08bd9441bd0">.</gs>AddColumn<gs class="GINGER_SOFTWARE_mark"
    ginger_software_uiphraseguid="230c9450-a9fa-4694-ba10-df8a0eefbcfa" id="2661e215-822d-4461-ae1a-38ddaffdcfc2">(</gs>#"Merged Columns", "Custom", each <gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="230c9450-a9fa-4694-ba10-df8a0eefbcfa"
    id="c471a6f8-de61-44ec-a4eb-4fe5879b86b3">getWorkbook</gs><gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="230c9450-a9fa-4694-ba10-df8a0eefbcfa" id="beb17741-a1bd-45ee-8770-667b6a429b31">(</gs>[path])),
        #"Expand Custom" = Table<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="7656e752-6806-471e-aac1-aca2acc34672" id="6ab7b6f5-e01e-4969-8f22-ec937a2e8f68">.</gs>ExpandTableColumn<gs
    class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="7656e752-6806-471e-aac1-aca2acc34672" id="064c2ab1-0322-4b2d-9734-86b2fca4d3dd">(</gs>#"Added Custom", "Custom", {"Column1", "Column2",
    "Column3", "Column4", "Column5", "Column6", "Column7", "Column8", "Column9", "Column10", "Column11", "Column12"}, {"Column1", "Column2",
    "Column3", "Column4", "Column5", "Column6", "Column7", "Column8", "Column9", "Column10", "Column11", "Column12"}),
        #"Removed Columns" = Table<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="73ce11cc-0be6-423c-9cd7-996ab0d86c3e" id="875308f8-4505-42bc-bcc4-be136c0d8b99">.</gs><gs class="GINGER_SOFTWARE_mark"
    ginger_software_uiphraseguid="73ce11cc-0be6-423c-9cd7-996ab0d86c3e" id="43a91980-9dd1-40da-84d4-4c2700db63ed">RemoveColumns</gs><gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="73ce11cc-0be6-423c-9cd7-996ab0d86c3e"
    id="d8ecfee3-de63-4dd9-a916-ebdc7ab155f3">(</gs>#"Expand Custom"<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="73ce11cc-0be6-423c-9cd7-996ab0d86c3e" id="70479dcb-a6a3-44ac-a329-476bd6c2f216">,</gs>{"path"}),
        #"Removed Duplicates" = Table<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="42c1a334-cff4-432d-8d41-237fbb6cfa2b" id="4efdfd6c-b953-46bd-a673-d9f93ed4fb64">.</gs>Distinct<gs
    class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="42c1a334-cff4-432d-8d41-237fbb6cfa2b" id="4108b4fd-3cbb-44ff-8f51-3e2f51e9b1e5">(</gs>#"Removed Columns", {"Column1"}),
        #"First Row as Header" = Table<gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="e97a3de6-d2de-4872-b8f1-7b26dbacacae" id="9b321e4e-0fbb-40be-9953-5918b7b2c363">.</gs>PromoteHeaders<gs
    class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="e97a3de6-d2de-4872-b8f1-7b26dbacacae" id="c566d7bd-ca67-457e-9698-dcaf9cf7ef90">(</gs>#"Removed Duplicates")
    <gs class="GINGER_SOFTWARE_mark" ginger_software_uiphraseguid="7344bfb8-a967-407a-8229-646035c0264c" id="2719b7a9-2b95-4862-8054-67f5da25353a">in</gs>
        #"First Row as Header"

Maybe you are looking for