Why this query required 2 extra columns in GROUP BY
Hi All,
This is my query:
SELECT "parent_level_id",
"parent_level_name"
"parent_level_id",
"child_level_id",
"child_level_name",
"scenario_id",
"scenario_name",
"exported",
"regress_cob",
"scn_status_id",
"tgt_scenario_name",
"load_status_id",
"load_count",
"PRESENT_VALUE",
"DELTA",
"GAMMA",
"VEGA",
"PNL",
"ORIGINAL_PRESENT_VALUE"
FROM
select 0-fssPV.feed_id as "parent_level_id",
fssPV.feed_description as "parent_level_name",
fssPV.book_id as "child_level_id",
fssPV.book_display_name as "child_level_name",
fssPV.parent_scenario_id as "scenario_id",
fssPV.scenario_display_name as "scenario_name",
nvl(sc.exported, 'N') as "exported",
nvl(fssPV.parent_regressed_cob_date, fssPV.parent_cob_date) as "regress_cob",
fssPV.scn_status_id as "scn_status_id",
fssPV.tgt_scenario_display_name as "tgt_scenario_name",
fssPV.load_status_id as "load_status_id",
count(*) as "load_count",
sum(fssPV.present_value) as "PRESENT_VALUE",
sum(fssPV.delta) as "DELTA",
sum(fssPV.gamma) as "GAMMA",
sum(fssPV.vega) as "VEGA",
sum(fssPV.present_value) - sum (srbase.present_value) as "PNL",
select SUM(vsr.original_present_value)
from validated_position vp , validated_scenario_result vsr
where
VSR.FEED_INSTANCE_ID= fssPV.feed_instance_id
AND VSR.COB_DATE= fssPV.cob_date
and vsr.validated_position_id= vp.validated_position_id
AND vsr.scenario_id in (SELECT distinct mn.node_id as "SCENARIO_ID"
FROM TABLE(pack_scenarios_overview.splitInListIntoLeaves(a_scenario_list, '$', l_cobdate)) nodeIds,
marsnode mn
WHERE nodeIds.column_value = mn.node_id
AND mn.close_date is null
) "ORIGINAL_PRESENT_VALUE"
from (
select fssTgts.*,
mntgt.display_name as "TGT_SCENARIO_DISPLAY_NAME",
mnscenario.display_name as "SCENARIO_DISPLAY_NAME",
mnbook.display_name as "BOOK_DISPLAY_NAME",
fs.feed_description,
sr.present_value,
sr.delta,
sr.gamma,
sr.vega,
p.position_id
from (
/** Treewalk to find Feed/Scenario dependencies **/
Select fssBooks.feed_id,
fssLinks.cob_date,
fssLinks.scenario_id,
fssBooks.load_status_id,
fssBooks.scn_status_id,
fssBooks.tgt_scenario_id,
fssBooks.book_id,
fssBooks.regressed_cob_date,
fssLinks.feed_instance_id,
fssLinks.parent_cob_date,
fssLinks.parent_tgt_scenario_id,
fssLinks.parent_regressed_cob_date,
fssLinks.parent_scenario_id,
fssLinks.parent_feed_instance_id
from (
/** Treewalk (backwards) to find underlying data over FSS control table **/
WITH fss AS
/* Initial filter on feed_scenario_status for peformance */
/* cannot filter on scenario as we don't know the dependencies yet */
( select DISTINCT scenario_id,
f.cob_date,
f.scn_status_id,
f.regressed_cob_date,
f.tgt_scenario_id,
f.feed_id,
f.feed_instance_id
from feed_scenario_status f,
feed_group_xref fgx
where cob_date <= l_cobdate -- PVs for this cob must be loaded either today, or its regressed to a previous cob
and f.feed_id = fgx.feed_id
and fgx.feed_group_id like l_feed_group_id
and fgx.feed_id like l_feed_id
select feed_instance_id,
scenario_id,
prior scenario_id,
cob_date,
feed_id,
scn_status_id,
regressed_cob_date,
/* Need to maintain the root values to identify the scenarios that users are seeing */
CONNECT_BY_ISLEAF "ISLEAF",
CONNECT_BY_ROOT scenario_id as "PARENT_SCENARIO_ID",
CONNECT_BY_ROOT feed_id as "PARENT_FEED_ID",
CONNECT_BY_ROOT cob_date as "PARENT_COB_DATE",
CONNECT_BY_ROOT tgt_scenario_id as "PARENT_TGT_SCENARIO_ID",
CONNECT_BY_ROOT regressed_cob_date as "PARENT_REGRESSED_COB_DATE",
CONNECT_BY_ROOT feed_instance_id as "PARENT_FEED_INSTANCE_ID"
from fss
start with fss.cob_date = l_cobdate
connect by (prior fss.regressed_cob_date = fss.cob_date and
prior fss.scenario_id = fss.scenario_id and
prior fss.feed_id = fss.feed_id and
prior fss.tgt_scenario_id is null
) -- Connect if regressed (rule: same feed/book/scenario, different cob)
or (prior fss.tgt_scenario_id = fss.scenario_id and
prior fss.cob_date = fss.cob_date and
prior fss.feed_id = fss.feed_id and
prior fss.feed_instance_id = fss.feed_instance_id
) -- and connect if paste as (rule: same feed/cob/book, different scenario)
) fssLinks,
feed_scenario_status fssBooks
where isLeaf = 1
and fssLinks.parent_feed_id = fssBooks.feed_Id
and fssLinks.parent_cob_date = fssBooks.cob_date
and fssLinks.parent_scenario_id = fssBooks.scenario_id
) fssTgts,
position p,
scenario_result sr,
marsnode mntgt,
marsnode mnbook,
marsnode mnscenario,
feed_static fs
where fssTgts.feed_id = fs.feed_id
and fssTgts.parent_tgt_scenario_id = mntgt.node_id (+)
and mntgt.close_date (+) is null
and fssTgts.feed_instance_id = p.feed_instance_id
and fssTgts.book_id = p.book_id
and fssTgts.cob_date = sr.cob_date
and fssTgts.scenario_id = sr.scenario_id
and p.feed_instance_id = sr.feed_instance_id
and p.position_id = sr.position_id
and p.book_id = mnbook.node_id
and mnbook.close_date is null
and fssTgts.parent_scenario_id = mnscenario.node_id
and mnscenario.close_date is null
) fssPV
left outer join scenario_control sc
on fssPV.parent_feed_instance_id = sc.feed_instance_id
and upper(fssPV.scenario_display_name) = upper(sc.scenario)
/* Join to SBM to calculate P&Ls */
left outer join scenario_base_map sbm
on fssPV.scenario_id = sbm.scenario_id
and sbm.begin_cob_date <= l_cobdate
and sbm.end_cob_date > l_cobdate
left outer join scenario_result srbase
on fssPV.cob_date = srbase.cob_date
and fssPV.feed_instance_id = srbase.feed_instance_id
and nvl(sbm.mapped_scenario_id, l_original_scn_id) = srbase.scenario_id
and fssPV.position_id = srbase.position_id
/* Only display (root) scenarios that users have selected */
where fssPV.parent_scenario_id in (SELECT distinct mn.node_id as "SCENARIO_ID"
FROM TABLE(pack_scenarios_overview.splitInListIntoLeaves(a_scenario_list, '$', l_cobdate)) nodeIds,
marsnode mn
WHERE nodeIds.column_value = mn.node_id
AND mn.close_date is null
group by fssPV.feed_id,
fssPV.feed_description,
fssPV.book_id,
fssPV.book_display_name,
fssPV.parent_scenario_id,
fssPV.scenario_display_name,
sc.exported,
fssPV.parent_regressed_cob_date,
fssPV.scn_status_id,
fssPV.tgt_scenario_display_name,
fssPV.load_status_id,
fssPV.parent_cob_date
-- fssPV.feed_instance_id,
-- fssPV.cob_date
);In this query for the initial section where I am selecting the "ORIGINAL_PRESENT_VALUE" part
select SUM(vsr.original_present_value)
from validated_position vp , validated_scenario_result vsr
where
VSR.FEED_INSTANCE_ID= fssPV.feed_instance_id
AND VSR.COB_DATE= fssPV.cob_date
and vsr.validated_position_id= vp.validated_position_id
AND vsr.scenario_id in (SELECT distinct mn.node_id as "SCENARIO_ID"
FROM TABLE(pack_scenarios_overview.splitInListIntoLeaves(a_scenario_list, '$', l_cobdate)) nodeIds,
marsnode mn
WHERE nodeIds.column_value = mn.node_id
AND mn.close_date is null
) "ORIGINAL_PRESENT_VALUE" Even though fssPV.feed_instance_id and fssPV.cob_date are not used anywhere in the SELECT clause requires tobe added to the final GROUP BY clause else
the query though compiles successfully doesnt executes giving "Not a group by " error.
Can anyone tell me why this is the problem and why these columns need to be added to the final GROUP BY clause.
Rgds,
Aashish
Hi, Aashish,
Aashish S. wrote:
Even though fssPV.feed_instance_id and fssPV.cob_date are not used anywhere in the SELECT clause requires tobe added to the final GROUP BY clause else
the query though compiles successfully doesnt executes giving "Not a group by " error.Actually, you are using both of those columns in the SELECT clause, when you compute original_present_value:
select SUM(vsr.original_present_value)
from validated_position vp , validated_scenario_result vsr
where
VSR.FEED_INSTANCE_ID= fssPV.feed_instance_id
AND VSR.COB_DATE= fssPV.cob_date
and vsr.validated_position_id= vp.validated_position_id
AND vsr.scenario_id in (SELECT distinct mn.node_id as "SCENARIO_ID"
FROM TABLE(pack_scenarios_overview.splitInListIntoLeaves(a_scenario_list, '$', l_cobdate)) nodeIds,
marsnode mn
WHERE nodeIds.column_value = mn.node_id
AND mn.close_date is null
) "ORIGINAL_PRESENT_VALUE" Since the scalar sub-query is correlated to fsspv.feed_instance_id and fsspv.cob_date, it is depenedant on those columns, the same as, say TRUNC (fsspv.cobb_date, 'YEAR') is depenedant on fsspv.cob_date.
Perhaps you need to put SUM in the main query, not the scalar sub-query, or (probably faster) replace the scalar sub-query with a join. Without knowing your tables, or the results you want from your data, I can't say more.
Whenever you have a problem, post a little sample data (CREATED TABLE and INSERT statements for all tables), and the results you want from that data.
Simplify the problem as much as possible. In this case, I think you can show what the problem is using just two tables, the ones aliases fsspv and mn.
If you can illustrate your problem using commonly available tables, like those in the scott schema, then you don't have to post any data; just the results you want.
It looks like you're doing something like this:
SELECT job
, COUNT (*) AS cnt
, ( -- Begin scalar sub_query to compute min_loc
SELECT MIN (loc)
FROM scott.dept
WHERE deptno = e.deptno
) min_loc -- End scalar sub_query to compute min_loc
FROM scott.emp e
GROUP BY job
, deptno -- Omitting this causes ORA_00979: not a GROUP BY expression
;when you really should be doing something like this:
SELECT job
, COUNT (*) AS cnt
, MIN ( ( -- Begin scalar sub_query to compute min_loc
SELECT loc
FROM scott.dept
WHERE deptno = e.deptno
) ) min_loc -- End scalar sub_query to compute min_loc
FROM scott.emp e
GROUP BY job
;to get these results:
JOB CNT MIN_LOC
CLERK 4 CHICAGO
SALESMAN 4 CHICAGO
PRESIDENT 1 NEW YORK
MANAGER 3 CHICAGO
ANALYST 2 DALLAS
Similar Messages
-
Why this Query is taking much longer time than expected?
Hi,
I need experts support on the below mentioned issue:
Why this Query is taking much longer time than expected? Sometimes I am getting connection timeout error. Is there any better way to achieve result in shortest time. Below, please find the DDL & DML:
DDL
BHDCollections
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
SET ANSI_PADDING ON
GO
CREATE TABLE [dbo].[BHDCollections](
[BHDCollectionid] [bigint] IDENTITY(1,1) NOT NULL,
[GroupMemberid] [int] NOT NULL,
[BHDDate] [datetime] NOT NULL,
[BHDShift] [varchar](10) NULL,
[SlipValue] [decimal](18, 3) NOT NULL,
[ProcessedValue] [decimal](18, 3) NOT NULL,
[BHDRemarks] [varchar](500) NULL,
[Createdby] [varchar](50) NULL,
[Createdon] [datetime] NULL,
CONSTRAINT [PK_BHDCollections] PRIMARY KEY CLUSTERED
[BHDCollectionid] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
GO
SET ANSI_PADDING OFF
BHDCollectionsDet
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE TABLE [dbo].[BHDCollectionsDet](
[CollectionDetailid] [bigint] IDENTITY(1,1) NOT NULL,
[BHDCollectionid] [bigint] NOT NULL,
[Currencyid] [int] NOT NULL,
[Denomination] [decimal](18, 3) NOT NULL,
[Quantity] [int] NOT NULL,
CONSTRAINT [PK_BHDCollectionsDet] PRIMARY KEY CLUSTERED
[CollectionDetailid] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
Banks
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
SET ANSI_PADDING ON
GO
CREATE TABLE [dbo].[Banks](
[Bankid] [int] IDENTITY(1,1) NOT NULL,
[Bankname] [varchar](50) NOT NULL,
[Bankabbr] [varchar](50) NULL,
[BankContact] [varchar](50) NULL,
[BankTel] [varchar](25) NULL,
[BankFax] [varchar](25) NULL,
[BankEmail] [varchar](50) NULL,
[BankActive] [bit] NULL,
[Createdby] [varchar](50) NULL,
[Createdon] [datetime] NULL,
CONSTRAINT [PK_Banks] PRIMARY KEY CLUSTERED
[Bankid] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
GO
SET ANSI_PADDING OFF
Groupmembers
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
SET ANSI_PADDING ON
GO
CREATE TABLE [dbo].[GroupMembers](
[GroupMemberid] [int] IDENTITY(1,1) NOT NULL,
[Groupid] [int] NOT NULL,
[BAID] [int] NOT NULL,
[Createdby] [varchar](50) NULL,
[Createdon] [datetime] NULL,
CONSTRAINT [PK_GroupMembers] PRIMARY KEY CLUSTERED
[GroupMemberid] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
GO
SET ANSI_PADDING OFF
GO
ALTER TABLE [dbo].[GroupMembers] WITH CHECK ADD CONSTRAINT [FK_GroupMembers_BankAccounts] FOREIGN KEY([BAID])
REFERENCES [dbo].[BankAccounts] ([BAID])
GO
ALTER TABLE [dbo].[GroupMembers] CHECK CONSTRAINT [FK_GroupMembers_BankAccounts]
GO
ALTER TABLE [dbo].[GroupMembers] WITH CHECK ADD CONSTRAINT [FK_GroupMembers_Groups] FOREIGN KEY([Groupid])
REFERENCES [dbo].[Groups] ([Groupid])
GO
ALTER TABLE [dbo].[GroupMembers] CHECK CONSTRAINT [FK_GroupMembers_Groups]
BankAccounts
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
SET ANSI_PADDING ON
GO
CREATE TABLE [dbo].[BankAccounts](
[BAID] [int] IDENTITY(1,1) NOT NULL,
[CustomerID] [int] NOT NULL,
[Locationid] [varchar](25) NOT NULL,
[Bankid] [int] NOT NULL,
[BankAccountNo] [varchar](50) NOT NULL,
CONSTRAINT [PK_BankAccounts] PRIMARY KEY CLUSTERED
[BAID] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
GO
SET ANSI_PADDING OFF
GO
ALTER TABLE [dbo].[BankAccounts] WITH CHECK ADD CONSTRAINT [FK_BankAccounts_Banks] FOREIGN KEY([Bankid])
REFERENCES [dbo].[Banks] ([Bankid])
GO
ALTER TABLE [dbo].[BankAccounts] CHECK CONSTRAINT [FK_BankAccounts_Banks]
GO
ALTER TABLE [dbo].[BankAccounts] WITH CHECK ADD CONSTRAINT [FK_BankAccounts_Locations1] FOREIGN KEY([Locationid])
REFERENCES [dbo].[Locations] ([Locationid])
GO
ALTER TABLE [dbo].[BankAccounts] CHECK CONSTRAINT [FK_BankAccounts_Locations1]
Currency
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
SET ANSI_PADDING ON
GO
CREATE TABLE [dbo].[Currency](
[Currencyid] [int] IDENTITY(1,1) NOT NULL,
[CurrencyISOCode] [varchar](20) NOT NULL,
[CurrencyCountry] [varchar](50) NULL,
[Currency] [varchar](50) NULL,
CONSTRAINT [PK_Currency] PRIMARY KEY CLUSTERED
[Currencyid] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
GO
SET ANSI_PADDING OFF
CurrencyDetails
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
SET ANSI_PADDING ON
GO
CREATE TABLE [dbo].[CurrencyDetails](
[CurDenid] [int] IDENTITY(1,1) NOT NULL,
[Currencyid] [int] NOT NULL,
[Denomination] [decimal](15, 3) NOT NULL,
[DenominationType] [varchar](25) NOT NULL,
CONSTRAINT [PK_CurrencyDetails] PRIMARY KEY CLUSTERED
[CurDenid] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
GO
SET ANSI_PADDING OFF
QUERY
WITH TEMP_TABLE AS
SELECT 0 AS COINS, BHDCollectionsDet.Quantity AS BN, BHDCollections.BHDDate AS CollectionDate, BHDCollectionsDet.Currencyid,
(BHDCollections.BHDCollectionid) AS DSLIPS, Banks.Bankname
FROM BHDCollections INNER JOIN
BHDCollectionsDet ON BHDCollections.BHDCollectionid = BHDCollectionsDet.BHDCollectionid INNER JOIN
GroupMembers ON BHDCollections.GroupMemberid = GroupMembers.GroupMemberid INNER JOIN
BankAccounts ON GroupMembers.BAID = BankAccounts.BAID INNER JOIN
Currency ON BHDCollectionsDet.Currencyid = Currency.Currencyid INNER JOIN
CurrencyDetails ON Currency.Currencyid = CurrencyDetails.Currencyid INNER JOIN
Banks ON BankAccounts.Bankid = Banks.Bankid
GROUP BY BHDCollectionsDet.Quantity, BHDCollections.BHDDate, BankAccounts.Bankid, BHDCollectionsDet.Currencyid, CurrencyDetails.DenominationType,
CurrencyDetails.Denomination, BHDCollectionsDet.Denomination, Banks.Bankname,BHDCollections.BHDCollectionid
HAVING (BHDCollections.BHDDate BETWEEN @FromDate AND @ToDate) AND (BankAccounts.Bankid = @Bankid) AND (CurrencyDetails.DenominationType = 'Currency') AND
(CurrencyDetails.Denomination = BHDCollectionsDet.Denomination)
UNION ALL
SELECT BHDCollectionsDet.Quantity AS COINS, 0 AS BN, BHDCollections.BHDDate AS CollectionDate, BHDCollectionsDet.Currencyid,
(BHDCollections.BHDCollectionid) AS DSLIPS, Banks.Bankname
FROM BHDCollections INNER JOIN
BHDCollectionsDet ON BHDCollections.BHDCollectionid = BHDCollectionsDet.BHDCollectionid INNER JOIN
GroupMembers ON BHDCollections.GroupMemberid = GroupMembers.GroupMemberid INNER JOIN
BankAccounts ON GroupMembers.BAID = BankAccounts.BAID INNER JOIN
Currency ON BHDCollectionsDet.Currencyid = Currency.Currencyid INNER JOIN
CurrencyDetails ON Currency.Currencyid = CurrencyDetails.Currencyid INNER JOIN
Banks ON BankAccounts.Bankid = Banks.Bankid
GROUP BY BHDCollectionsDet.Quantity, BHDCollections.BHDDate, BankAccounts.Bankid, BHDCollectionsDet.Currencyid, CurrencyDetails.DenominationType,
CurrencyDetails.Denomination, BHDCollectionsDet.Denomination, Banks.Bankname,BHDCollections.BHDCollectionid
HAVING (BHDCollections.BHDDate BETWEEN @FromDate AND @ToDate) AND (BankAccounts.Bankid = @Bankid) AND (CurrencyDetails.DenominationType = 'COIN') AND
(CurrencyDetails.Denomination = BHDCollectionsDet.Denomination)),
TEMP_TABLE2 AS
SELECT CollectionDate,Bankname,DSLIPS AS DSLIPS,SUM(BN) AS BN,SUM(COINS)AS COINS FROM TEMP_TABLE Group By CollectionDate,DSLIPS,Bankname
SELECT CollectionDate,Bankname,count(DSLIPS) AS DSLIPS,sum(BN) AS BN,sum(COINS) AS coins FROM TEMP_TABLE2 Group By CollectionDate,Bankname
HAVING COUNT(DSLIPS)<>0;Without seeing an execution plan of the query it is hard to suggest something useful. Try insert the result of UNION ALL to the temporary table and then perform an aggregation on that table, not a CTE.
Just
SELECT CollectionDate,Bankname,DSLIPS AS DSLIPS,SUM(BN) AS BN,SUM(COINS)AS COINS FROM
#tmp Group By CollectionDate,DSLIPS,Bankname
HAVING COUNT(DSLIPS)<>0;
Best Regards,Uri Dimant SQL Server MVP,
http://sqlblog.com/blogs/uri_dimant/
MS SQL optimization: MS SQL Development and Optimization
MS SQL Consulting:
Large scale of database and data cleansing
Remote DBA Services:
Improves MS SQL Database Performance
SQL Server Integration Services:
Business Intelligence -
Quicktime error (this application requires an Extra (Quicktime 3)
I have just updated to the recent version of Director and am in the process of updating my previous files for teaching. Everthing seemed to work in the update process and I published a new Projector for my Mac running latest version of Snow Leopard. However, although the tutorial I created runs correctly when I run it direcly from Director's main program, when I run it from the projector I created I get this error:
Director Player error
This application requires an Extra (Quicktime 3) that either does not exist or failed to initialize properly. Please make sure
the appropriate extras are in the Extras folder(s).
Before I updated I didn't have an Extras folder. Can someone please advise, simply.
MickeyQuicktime 3 is pretty ancient, and Director 11 does not use it. You can probably get away with simply removing the xtra reference in Modify, Movie, Xtras.... Find the Quicktime 3 xtra and remove it. Director 11 comes with Quicktime 6 asset which is probably going to be added to your project automatically, so that should be all you need to do. If your code does something that is specific to Quicktime 3, then you may need to make adjustments to your code. You also may need to remove and re-import the quictime videos if they fail to play.
-
Why this query does not show the result?
Why the query with the schema prefixed does not show the result and the query without schema display the correct results?
SQL> select data_object_id,object_type from dba_objects where object_name='HR'.'JOBS';
select data_object_id,object_type from dba_objects where object_name='HR'.'JOBS'
ERROR at line 1:
ORA-00933: SQL command not properly ended
SQL> select data_object_id,object_type from dba_objects where object_name='HR.JOBS';
no rows selected
SQL> select data_object_id, OWNER, object_type from dba_objects where object_name='JOBS';
DATA_OBJECT_ID OWNER OBJECT_TYPE
69662 HR TABLE
OE SYNONYM
SQL> SELECT USER FROM DUAL;
USER
SYSHi,
the column object_name refers to a object_name which is 'JOBS', the column owner refers to the owner 'HR', the value isn't stored together, so you have to select the two columns. It is the same behaviour as every other table/view. Have a look at the values in the view DBA_OBJECTS.
Herald ten Dam
Superconsult.nl -
Silly old fogey (me) cannot figure out why this query returns 1 row
Hi all,
In reference to {thread:id=2456973}, why does
select sum(count(decode(job, 'CLERK', 1, null))) CLERKS
, sum(count(decode(job, 'SALESMAN', 1, null))) SALESMANS
from emp group by job;only return 1 row and not 1 for each job? I actually had to test it myself to believe it.
It returns data as if the query were
select sum(CLERKS), sum(SALESMANS)
from (select count(decode(job, 'CLERK', 1, null)) CLERKS, count(decode(job, 'SALESMAN', 1, null)) SALESMANS
from emp group by job)Using only a single aggregate (either count or sum) returns 1 row per job, as expectedJohn Stegeman wrote:
It returns data as if the query were
select sum(CLERKS), sum(SALESMANS)
from (select count(decode(job, 'CLERK', 1, null)) CLERKS, count(decode(job, 'SALESMAN', 1, null)) SALESMANS
from emp group by job)
Exactly the point ;-)
Seems like Oracle actually can do a "double group by" in the same operation.
Witness the explain plans in this example:
SQL> select count(decode(job, 'CLERK', 1, null)) CLERKS
2 , count(decode(job, 'SALESMAN', 1, null)) SALESMANS
3 from scott.emp group by job;
CLERKS SALESMANS
0 0
0 0
0 0
0 4
4 0
Execution Plan
Plan hash value: 1697595674
| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |
| 0 | SELECT STATEMENT | | 5 | 40 | 4 (25)| 00:00:01 |
| 1 | HASH GROUP BY | | 5 | 40 | 4 (25)| 00:00:01 |
| 2 | TABLE ACCESS FULL| EMP | 14 | 112 | 3 (0)| 00:00:01 |
---------------------------------------------------------------------------And compare it to this one with the double aggregates:
SQL> select sum(count(decode(job, 'CLERK', 1, null))) CLERKS
2 , sum(count(decode(job, 'SALESMAN', 1, null))) SALESMANS
3 from scott.emp group by job;
CLERKS SALESMANS
4 4
Execution Plan
Plan hash value: 417468012
| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |
| 0 | SELECT STATEMENT | | 1 | 8 | 4 (25)| 00:00:01 |
| 1 | SORT AGGREGATE | | 1 | 8 | 4 (25)| 00:00:01 |
| 2 | HASH GROUP BY | | 1 | 8 | 4 (25)| 00:00:01 |
| 3 | TABLE ACCESS FULL| EMP | 14 | 112 | 3 (0)| 00:00:01 |
----------------------------------------------------------------------------There is both HASH GROUP BY and SORT AGGREGATE.
It does not really make sense to do an aggregate on an aggregate - if both aggregates are used "on the same group-by level".
The sum() aggregates are used upon an already aggregated value, so it does look like Oracle actually treats that as "first do the inner aggregate using the specified group by and then do the outer aggregate on the result with no group by."
Look at this example where I combine "double" aggregates with "single" aggregates:
SQL> select sum(count(decode(job, 'CLERK', 1, null))) CLERKS
2 , sum(count(decode(job, 'SALESMAN', 1, null))) SALESMANS
3 , count(decode(job, 'SALESMAN', 1, null)) SALESMANS2
4 , count(*) COUNTS
5 from scott.emp group by job;
CLERKS SALESMANS SALESMANS2 COUNTS
4 4 1 5
Execution Plan
Plan hash value: 417468012
| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |
| 0 | SELECT STATEMENT | | 1 | 8 | 4 (25)| 00:00:01 |
| 1 | SORT AGGREGATE | | 1 | 8 | 4 (25)| 00:00:01 |
| 2 | HASH GROUP BY | | 1 | 8 | 4 (25)| 00:00:01 |
| 3 | TABLE ACCESS FULL| EMP | 14 | 112 | 3 (0)| 00:00:01 |
----------------------------------------------------------------------------When mixing "double" and "single" aggregates, Oracle decides that single aggregates belong in the "outer" aggregation.
SALESMAN2 is doing a count on the aggregated job column that is the result of the "inner" group by - therefore only 1.
The count(*) also counts the result of the "inner" aggregation.
I am not sure if this is documented or if it is a "sideeffect" of either the internal code used for GROUPING SETS or the internal code used for allowing analytic functions like this:
SQL> select count(decode(job, 'CLERK', 1, null)) CLERKS
2 , count(decode(job, 'SALESMAN', 1, null)) SALESMANS
3 , sum(count(decode(job, 'CLERK', 1, null))) over () CLERKS2
4 , sum(count(decode(job, 'SALESMAN', 1, null))) over () SALESMANS2
5 from scott.emp group by job;
CLERKS SALESMANS CLERKS2 SALESMANS2
0 0 4 4
4 0 4 4
0 0 4 4
0 0 4 4
0 4 4 4
Execution Plan
Plan hash value: 4115955660
| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |
| 0 | SELECT STATEMENT | | 5 | 40 | 4 (25)| 00:00:01 |
| 1 | WINDOW BUFFER | | 5 | 40 | 4 (25)| 00:00:01 |
| 2 | SORT GROUP BY | | 5 | 40 | 4 (25)| 00:00:01 |
| 3 | TABLE ACCESS FULL| EMP | 14 | 112 | 3 (0)| 00:00:01 |
----------------------------------------------------------------------------Personally I think I would have preferred if Oracle raised an error on this "double aggregation" and thus require me to write it this way (if that is the result I desired):
select sum(CLERKS), sum(SALESMANS)
from (select count(decode(job, 'CLERK', 1, null)) CLERKS, count(decode(job, 'SALESMAN', 1, null)) SALESMANS
from emp group by job)I can not really think of good use-cases for the "double aggregation" - but rather that it could give you unnoticed bugs in your code if you happen to do double aggregation without noticing it.
Interesting thing to know ;-) -
Can anyone solve this query requirement
Would like to know if anyone solved this situation in the query before. If yes, or If you have any ideas, could you please share it with me.
Below is the scenario.
Cube data: We have a number of 'FACILIIES'. 'Surveys' are conducted for each facility once in every six to 18 months. No fixed time intervals though. And surveys are numbered sequencially, always in the increasing order with respect to time. each survey has a 'survey date'. and a keyfigure 'Count'.
DATA IN THE CUBE AS OF 4/30/2005
FACILITY...SURVEYID...SURVEYDATE...COUNTKEYFIGURE
525... 121... 1/6/2004... 6
624... 132... 2/20/2004... 7
525... 138... 10/1/2004... 5
624... 140... 9/15/2004... 4
525... 157... 3/10/2005... 8
624... 245... 4/15/2005... 6
If the query is run for the above data, is shouls be displayed like this.
REPORT AS OF 04/30/2005
FACILITY...LATESTSURVEY...LATESTCOUNT...PREVIOUSSURVEY PRECOUNT
525... 157... 8... 138... 5
624... 245... 6... 140... 4
Once the data is updated further, this is the data in the Cube as of 10/30/2005
DATA IN THE CUBE AS OF 10/30/2005
FACILITY...SURVEYID...SURVEYDATE...COUNTKEYFIGURE
525... 121... 1/6/2004... 6
624... 132... 2/20/2004... 7
525... 138... 10/1/2004... 5
624... 140... 9/15/2004... 4
525... 157... 3/10/2005... 8
624... 245... 4/15/2005... 6
525... 290... 8/20/2005... 9
624... 312... 10/15/2005... 4
REPORT AS OF 04/30/2005
FACILITY LATESTSUREY LATESTCOUNT PREVIOUSSURVEY PRECOUNT
525... 290... 9... 157... 8
624... 312... 4... 245... 6
Dynamically, the latest survey and previous survey has to be determined. Any ideas on how to solve... We alrady thought of making changes to the Survey Master data. Any thing that can be done in the query itself?
thanks
Gova
(I could not improve the display format, so I used '...' to separate the fields. may be SDN should look into improving the display format)Hi Gova..
We too had a similar requirement..to get the previous records..
We had to end up having to populate the Previous record in a seperate field on the same line..
I think you are on the right path to modify the master data and have the previous survey and previous count populated on every line..
Good Luck
Ashish.. -
Why this query is giving error in report
I have a simple query ...
SELECT &P_FLEXDATA C_FLEXDATA,
CHART_OF_ACCOUNTS_ID C_NUM
FROM GL_CODE_COMBINATIONS
It is running fine in SQL prompt but when I create it as a report query then it is giving error: ORA-00904 "C_FLEXDATA": invalid identifier
Please advise ....
I shall be ever thankful...
Best RegardsHello,
You are using a "lexical" reference in the SQL query (&P_FLEXDATA).
You must set a default value for P_FLEXDATA in order the SQL Query to be parsed successfuly
Regards -
Why does Mail require "Re:" subject prefix to group conversations properly?
When the Apple Mail program (using Mavericks 10.9.1) is configured to "View:Organize by Conversation", it is necessary for the email subject line to be prefixed with "Re:" in order for the email from different senders to be grouped into the same conversation, even if the emails contain proper "In-Reply-To" header fields. Why does the program require this subject line prefix, versus just using the In-Reply-To field values to group conversations? Many automated systems that generate emails (e.g., web-based helpdesk systems) will send email with the proper In-Reply-To field values to associate emails for the same topic/case, but they do not use the "Re:" subject line prefix. This seems to be a defect with the design of the Mail program to require this prefix for the conversation grouping to work properly.
Does anyone know if there is legitimate rationale for this behavior? Does anyone know if there is a work-around for this problem (short of changing these other systems to prefix the subject line)?
Thanks, SteveHi
You asked about a recommendation for an app to paste a group of addresses into the To, Cc or Bcc fields of an email?
Our "MailShot Pro" app is the only one we know of on the App Store that creates special "group contacts" in your address book which contain all your groups email addresses, but can be used just like regular contacts from most of your favourite apps. We now have nearly 50000 people using it, and growing steadily every day.
Here is a link if you would like to know more “MailShot Pro” (itunes link).
A free version is also available if you'd like to try it out with a small nmber of contacts.
If you need any customer support just contact us at the website, we're always happy to help.
Peter
www.solubleapps.com
Disclosure: I am the developer of this app and may benefit from its sale- (but so might you) -
Please find this query very urgent
Query-->select value from mytable where value is not null
union all
select value from mytable where value is null
Ex: table name::mytable
In this table i have one column called 'value' and this column values shold be like this
12
null
13
null
11
null
....i executed above query the result will be like this
12
13
11
null
null
null
the above query executes on the table it's effect only one column result
but i need multiple columns to be effected same as above one only....Anyone please help me on it and give the query.I have a table..MyTable(value number(10))
select * from Mytable;value
12
null
13
null
11
null
This is the table i have and the records i have...... and i need output like this
value
12
13
11
null
null
null ..............>ike this i need output for this i use this query i.e
Query-->select value from mytable where value is not null
union all
select value from mytable where value is null
------------>and this query gives only one column result i need multiple results like below: i have table with 2 columns like below
MyTable2(value1 number(10),value2 number(10))
select * fromMyTable2value1 value2
111 null
null 201
112 null
null 200
110 null
this is the table i have......... and i need output like below
value1 value2
111 201
112 200
110 null
null null
null null
NOTE:: If we use ORDER BY table records order must change, i don't want to change any order....please what is the query for that -
Why is this query not using the index?
check out this query:-
SELECT CUST_PO_NUMBER, HEADER_ID, ORDER_TYPE, PO_DATE
FROM TABLE1
WHERE STATUS = 'N'
and here's the explain plan:-
1
2 -------------------------------------------------------------------------------------
3 | Id | Operation | Name | Rows | Bytes | Cost (%CPU)|
4 -------------------------------------------------------------------------------------
5 | 0 | SELECT STATEMENT | | 2735K| 140M| 81036 (2)|
6 |* 1 | TABLE ACCESS FULL| TABLE1 | 2735K| 140M| 81036 (2)|
7 -------------------------------------------------------------------------------------
8
9 Predicate Information (identified by operation id):
10 ---------------------------------------------------
11
12 1 - filter("STATUS"='N')
There is already an index on this column, as is shown below:-
INDEX_NAME INDEX_TYPE UNIQUENESS TABLE_NAME COLUMN_NAME COLUMN_POSITION
1 TABLE1_IDX2 NORMAL NONUNIQUE TABLE1 STATUS 1
2 TABLE1_IDX NORMAL NONUNIQUE TABLE1 HEADER_ID 1
So why is this query not using the index on the 'STATUS' Column?
I've already tried using optimizer hints and regathering the stats on the table, but the execution plan still remains the same, i.e. it still uses a FTS.
I have tried this command also:-
exec dbms_stats.gather_table_stats('GECS','GEPS_CS_SALES_ORDER_HEADER',method_opt=>'for all indexed columns size auto',cascade=>true,degree=>4);
inspite of this, the query is still using a full table scan.
The table has around 55 Lakh records, across 60 columns. And because of the FTS, the query is taking a long time to execute. How do i make it use the index?
Please help.
Edited by: user10047779 on Mar 16, 2010 6:55 AMIf the cardinality is really as skewed as that, you may want to look at putting a histogram on the column (sounds like it would be in order, and that you don't have one).
create table skewed_a_lot
as
select
case when mod(level, 1000) = 0 then 'N' else 'Y' end as Flag,
level as col1
from dual connect by level <= 1000000;
create index skewed_a_lot_i01 on skewed_a_lot (flag);
exec dbms_stats.gather_table_stats(user, 'SKEWED_A_LOT', cascade => true, method_opt => 'for all indexed columns size auto');Is an example. -
I found a bug in Power Query. Anyone know why this occurs?
It took some time to isolate the cause of this.
One of our SharePoint sites couldn't successfully be connected to with Power Query for Excel 2013. We would receive this message: "DataSource.Error: OData: The feed's metadata document appears to be invalid."
I made a site template and started making copies of the failing site in order to test the cause of the error.
I narrowed it down to 16 lists that, if present on the site, would cause the connection to fail. Those lists' names were simply numbers. For example: "1", "2", etc.
I noticed in the Listdata.svc feed for the site that only the choice columns were included in the metadata and not the single line of text column names. In Listdata.svc, it appends a "c_" in front of the both the list and choice column
names if the list name starts with a number.
After many tests I believe I have confirmed you cannot start the name of a list with a number IF you have a choice column in that list. Anyone know why this is?Hi,
Thanks for reporting this issue. Would you be able to capture some Fiddler traces and send them our way so we can take a closer look at the underlying errors?
Feel free to send them via "Send a Frown" in PQ for privacy.
Thanks,
M. -
How Can i add "DateDiff(day, T0.DueDate" as a column in this query?
How Can i add "DateDiff(day, T0.DueDate" as a column in this query?
SELECT T1.CardCode, T1.CardName, T1.CreditLine, T0.RefDate, T0.Ref1 'Document Number',
CASE WHEN T0.TransType=13 THEN 'Invoice'
WHEN T0.TransType=14 THEN 'Credit Note'
WHEN T0.TransType=30 THEN 'Journal'
WHEN T0.TransType=24 THEN 'Receipt'
END AS 'Document Type',
T0.DueDate, (T0.Debit- T0.Credit) 'Balance'
,ISNULL((SELECT T0.Debit-T0.Credit WHERE DateDiff(day, T0.DueDate,'[%1]')<=-1),0) 'Future'
,ISNULL((SELECT T0.Debit-T0.Credit WHERE DateDiff(day, T0.DueDate,'[%1]')>=0 and DateDiff(day, T0.DueDate,'[%1]')<=30),0) 'Current'
,ISNULL((SELECT T0.Debit-T0.Credit WHERE DateDiff(day, T0.DueDate,'[%1]')>30 and DateDiff(day, T0.DueDate,'[%1]')<=60),0) '31-60 Days'
,ISNULL((SELECT T0.Debit-T0.Credit WHERE DateDiff(day, T0.DueDate,'[%1]')>60 and DateDiff(day, T0.DueDate,'[%1]')<=90),0) '61-90 Days'
,ISNULL((SELECT T0.Debit-T0.Credit WHERE DateDiff(day, T0.DueDate,'[%1]')>90 and DateDiff(day, T0.DueDate,'[%1]')<=120),0) '91-120 Days'
,ISNULL((SELECT T0.Debit-T0.Credit WHERE DateDiff(day, T0.DueDate,'[%1]')>=121),0) '121+ Days'
FROM JDT1 T0 INNER JOIN OCRD T1 ON T0.ShortName = T1.CardCode
WHERE (T0.MthDate IS NULL OR T0.MthDate > [%1]) AND T0.RefDate <= [%1] AND T1.CardType = 'C'
ORDER BY T1.CardCode, T0.DueDate, T0.Ref1Hi,
As you mentioned not possible to assign the dynamic column in the query.
will give you example for generate a dynamic column name in SQL query, using this example you can achieve your requirement.
DECLARE @cols AS NVARCHAR(MAX),
@query AS NVARCHAR(MAX)
select @cols = STUFF((SELECT distinct ',' + QUOTENAME(C.Name)
from [History]
FOR XML PATH(''), TYPE
).value('.', 'NVARCHAR(MAX)')
,1,1,'')
set @query = 'SELECT [Date],' + @cols +'
from
select [Date], Name, Value
from [History]
) x
pivot
max(value)
for Name in (' + @cols + ')
) p '
execute(@query) -
Why would this query return three rows?
select pc.tour_id,
pcp.project_id,
pc.contr_num,
pc.current_contr_status,
pc.current_contr_substat
from p_contract_lead pcl
left join p_contract pc on pc.contr_num = '2385108'--pcl.contr_num
left join p_contract_purchase pcp on pcp.contr_num ='2385108'-- pc.contr_num
where pcl.lead_id = '179772978'
in this query it returns me 3 rows,
in p_contract there is only 1 row with contr_num = '2385108'
and p_contract_purchase also has 1 row with contr_num = '2385108'
in p_contract_lead the lead_id = '179772978' has 3 records but they dont have contr_num=2385108
im just wondering why is giving me these results?
shouldnt it give me zero results since the pcl.lead_id = '179772978' does not equal contr_num=2385108?
i have version 12.0.0.6, maybe is the way i dont understand the left join works, or could be something else?
sorry for not creating a example , because i really dont know how.
thanks for your helpHi,
FROM l
LEFT OUTER JOIN r ON ...
means include all rows from l whether or not they have a match in r.
If you have 3 rows in l, then there will always be at least 3 rows in the result set, regardless of what's in r (if anything).
natpidgeon wrote:
in p_contract there is only 1 row with contr_num = '2385108'
and p_contract_purchase also has 1 row with contr_num = '2385108'
in p_contract_lead the lead_id = '179772978' has 3 records but they dont have contr_num=2385108
im just wondering why is giving me these results?
shouldnt it give me zero results since the pcl.lead_id = '179772978' does not equal contr_num=2385108? ...
What you just described is basically how an inner join works.
In this case, it doesn't matter in the least whether pcl.lead_id equals anything in pc or not. The join condition between pcl and pc is
pc.contr_num = '2385108'
That is, a row from pcl will match a row in pc if (and only if) pc.contr_num='2385108'. The join condition makes no mention of pcl.lead_id (or any other column in pcl, for that matter), so what's in pcl.lead_id (or any other column of pcl) doesn't play any role in deciding if rows match.
sorry for not creating a example , because i really dont know how.
Post CREATE TABLE statements for all 3 tables. You only need to include the columns that are used in this query.
Post 1 INSERT statement for p_contract, with contr_num = '2385108'. It wouldn't hurt to post an additional row that you know shouldn't appear in the results.
Post 1 INSERT statement for p_contract_purchase, also with contr_num = '2385108'. Again, it wouldn't hurt to post a 2nd row that does not meet the join condition.
Post 3 INSERT statements p_contract_lead with lead_id = '179772978' , but NOT contr_num='2385108'. Again, having another row with a different lead_id would make a better example. -
Why don't this query fails??
Hi All,
A little brainstorming ....
I am providing the sql here..
Connected to Oracle Database 11g Enterprise Edition Release 11.2.0.1.0
Connected as dss
SQL>
SQL> UPDATE dw_mmg_cat_us
2 SET dw_eftv_to = TRUNC(sysdate)
3 WHERE (mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id) IN
4 (SELECT mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id
5 FROM dw_mmg_cat_us
6 WHERE dw_eftv_to is null
7 AND mcv_mc_mc_id = 'DUMP_NO'
8 MINUS
9 SELECT mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id
10 FROM merc_mmg_catus
11 WHERE mcv_mc_mc_id = 'DUMP_NO');
0 rows updated
SQL>
SQL> SELECT mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id
2 FROM dw_mmg_cat_us
3 WHERE dw_eftv_to is null
4 AND mcv_mc_mc_id = 'DUMP_NO'
5 MINUS
6 SELECT mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id
7 FROM merc_mmg_catus
8 WHERE mcv_mc_mc_id = 'DUMP_NO'
9 /
SELECT mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id
FROM dw_mmg_cat_us
WHERE dw_eftv_to is null
AND mcv_mc_mc_id = 'DUMP_NO'
MINUS
SELECT mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id
FROM merc_mmg_catus
WHERE mcv_mc_mc_id = 'DUMP_NO'
ORA-00904: "MCV_MC_MC_ID": invalid identifier
SQL> desc dw_mmg_cat_us
Name Type Nullable Default Comments
MCV_MC_MC_ID VARCHAR2(10) Merchandise category
MCV_MCV_ID VARCHAR2(10) The value for the category
MMG_MMGL_ID NUMBER(3) The unique identifier of the MMGL
MMG_MMG_ID VARCHAR2(10) The identifier of the MMG, unique within the significant level
DW_EFTV_FROM DATE Date the relationship was effective from No direct mapping from
DW_EFTV_TO DATE Y Date the relationship is effective to No direct mapping from exi
SQL> select * from merc_mmg_catus where rownum < 5;
MMG_ART_MMG_ID MC_MC_ID MCV_MCV_ID
12 CLASSTYPE O
13 CLASSTYPE O
14 CLASSTYPE O
15 CLASSTYPE O
SQL>
SQL> SELECT mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id
2 FROM merc_mmg_catus
3 WHERE mcv_mc_mc_id = 'DUMP_NO'
4 /
SELECT mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id
FROM merc_mmg_catus
WHERE mcv_mc_mc_id = 'DUMP_NO'
ORA-00904: "MCV_MC_MC_ID": invalid identifierHere merc_mmg_catus is the synonym it is not a table? But this object does not have mcv_mc_mc_id,mmg_mmg_id
these two field so why the above update statement is not failing. if i separately select this query it throws error as above.. then why this is not throwing in the update statement.
Thanks,
Sid>
SQL> UPDATE dw_mmg_cat_us
2 SET dw_eftv_to = TRUNC(sysdate)
3 WHERE (mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id) IN
4 (SELECT mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id
5 FROM dw_mmg_cat_us
6 WHERE dw_eftv_to is null
7 AND mcv_mc_mc_id = 'DUMP_NO'
8 MINUS
9 SELECT mcv_mc_mc_id,mcv_mcv_id,mmg_mmg_id
10 FROM merc_mmg_catus
11 WHERE mcv_mc_mc_id = 'DUMP_NO');When resolving names that have no prefix, Oracle will have to try and figure out what you mean.
The column names in line 9 have no prefix, so Oracle will try to figure it out.
It will check if the columns come from the table(s) in the from clause (line 10), as that is not the case it can find the column names in the table in line 1.
So I think it has resolved the names to be like this:
SQL> UPDATE dw_mmg_cat_us d3
2 SET d3.dw_eftv_to = TRUNC(sysdate)
3 WHERE (d3.mcv_mc_mc_id,d3.mcv_mcv_id,d3.mmg_mmg_id) IN
4 (SELECT d1.mcv_mc_mc_id,d1.mcv_mcv_id,d1.mmg_mmg_id
5 FROM dw_mmg_cat_us d1
6 WHERE d1.dw_eftv_to is null
7 AND d1.mcv_mc_mc_id = 'DUMP_NO'
8 MINUS
9 SELECT d3.mcv_mc_mc_id,d3.mcv_mcv_id,d3.mmg_mmg_id
10 FROM merc_mmg_catus d2
11 WHERE d2.mcv_mc_mc_id = 'DUMP_NO');Which doesn't make much sense, but is syntactically valid.
If you make a habit of prefixing column names, you would probably have written something like:
SQL> UPDATE dw_mmg_cat_us d3
2 SET d3.dw_eftv_to = TRUNC(sysdate)
3 WHERE (d3.mcv_mc_mc_id,d3.mcv_mcv_id,d3.mmg_mmg_id) IN
4 (SELECT d1.mcv_mc_mc_id,d1.mcv_mcv_id,d1.mmg_mmg_id
5 FROM dw_mmg_cat_us d1
6 WHERE d1.dw_eftv_to is null
7 AND d1.mcv_mc_mc_id = 'DUMP_NO'
8 MINUS
9 SELECT d2.mcv_mc_mc_id,d2.mcv_mcv_id,d2.mmg_mmg_id
10 FROM merc_mmg_catus d2
11 WHERE d2.mcv_mc_mc_id = 'DUMP_NO');Which would have failed because Oracle could see that d2 does not have those columns. -
Please show this query, data not showing why?
I created a report and write following query,it was working well since last 4 months but today automaticly not showing data I can undertstand why?
Becuase I didnt make any changes in this query.
Please help me
Urgent
SELECT ALL MERCH_ORDER.ORDERNO, MERCH_ORDER.ORDERDATE, MERCH_ORDER.SHIP_DATE, MERCH_ORDER.PONO,
MERCH_ORDER.SUBPP, MERCH_ORDER.PJNO, BUYER.B_NAME, BUYER.B_AJENT,
MERCH_ORDER.ITEM, MERCH_ORDER.FABRIC, MERCH_ORDER.QUALITY, MERCH_ORDER.COMPOSITION,
MERCH_ORDER.P_SIZE, MERCH_ORDER.QUANTITY, MERCH_ORDER.Q_UNIT,
MERCH_ORDER.NETWHT, MERCH_ORDER.WT_UNIT, MERCH_ORDER.TERM, MERCH_ORDER.COMM,
MERCH_ORDER.PRICE, MERCH_ORDER.CUR_SYMB, MERCH_ORDER.STATUS, MERCH_ORDER.REMARKS,
MERCH_ORDER.WONO, MERCH_ORDER.PRONO, MERCH_ORDER.PES_QUANTITY,
MERCH_ORDER.PES_Q_UNIT, MERCH_ORDER.PES_PRICE, MERCH_ORDER.PES_CUR_SYMB
FROM BUYER, MERCH_ORDER
WHERE MERCH_ORDER.CANCEL IS NULL
AND (MERCH_ORDER.B_CODE = BUYER.B_CODE)
and merch_order.orderno not in
(select export_order1.orderno from export_order1)
ORDER BY MERCH_ORDER.ORDERNO
there is no any error and msg header and footer print.Maybe , the query in "NOT IN" clause select export_order1.orderno from export_order1
return the same rows as the following portion return....
SELECT ALL MERCH_ORDER.ORDERNO, MERCH_ORDER.ORDERDATE, MERCH_ORDER.SHIP_DATE, MERCH_ORDER.PONO,
MERCH_ORDER.SUBPP, MERCH_ORDER.PJNO, BUYER.B_NAME, BUYER.B_AJENT,
MERCH_ORDER.ITEM, MERCH_ORDER.FABRIC, MERCH_ORDER.QUALITY, MERCH_ORDER.COMPOSITION,
MERCH_ORDER.P_SIZE, MERCH_ORDER.QUANTITY, MERCH_ORDER.Q_UNIT,
MERCH_ORDER.NETWHT, MERCH_ORDER.WT_UNIT, MERCH_ORDER.TERM, MERCH_ORDER.COMM,
MERCH_ORDER.PRICE, MERCH_ORDER.CUR_SYMB, MERCH_ORDER.STATUS, MERCH_ORDER.REMARKS,
MERCH_ORDER.WONO, MERCH_ORDER.PRONO, MERCH_ORDER.PES_QUANTITY,
MERCH_ORDER.PES_Q_UNIT, MERCH_ORDER.PES_PRICE, MERCH_ORDER.PES_CUR_SYMB
FROM BUYER, MERCH_ORDER
WHERE MERCH_ORDER.CANCEL IS NULL
AND (MERCH_ORDER.B_CODE = BUYER.B_CODE)
OR
there are no rows which conform to the joining condition between the two tables BUYER and MERCH_ORDER ....
Regards,
Simon
Maybe you are looking for
-
I sync 2.4 will not load
iSync for the ical and address book are working fine. the iSync for the phone will not load. and it has a generic Icon. version 2.4. I have: reinstalled the isync. via update. deleted it and reinstalled via update. Tiger disk will not do a isync only
-
SQL Query Updateable Report Error on Submit
I'm using AppX version 2.0.0.00.49 and have modified a standard SQL report into an updateable report that modifies the value of a single column. When I submit the page, I receive this error: Error in mru internal routine: ORA-20001: Error in MRU: row
-
SPED functionality in ECC 6.0
Can anybody throw some light on the new SPED functionality in ECC 6.0. Would appreciate any documentation. Thanks Andrew
-
Hello all, I am very new to oracle and i wanted to know if i have a full level 0 rman backup (with archive log ) that i have taken today(2/9/2012 13:00 pm cst).... and now the questions is... is there anyway possible or is oracle is capable of rollin
-
I'm trying to create a page that displays the results of a query, but will allow you to filter them by date, name, etc. I'm relatively new to this concept, but reading a bit online I found some examples that did what I'm trying to do. I have a few qu