Need help in improving performance of prorating quantities to stores for existing orders
I have a code written to allocate quantities to stores for an existing order. Suppose there is a supplier order with quantity of 100 and this needs to distributed among 4 stores which has a demand of 50,40,30 and 20. Since total demand not equal to available quantity. the available quantity needs to be allocated to stores using an algorithm.
ALgorithm is like allocating the stores in small pieces of innersize. Innersize is nothing but
quantity within the pack of packs i.e. pack has 4 pieces and each pieces internally has 10 pieces,
this 10 is called innersize.
While allocating, each store is provided quantities of innersize first and this looping continues
until available quantity is over
Ex:
store1=10
store2=10
store3=10
store4=10
second time:
store1=10(old)+10
store2=10(old)+10
store3=10(old)+10
store4=10(old)+10--demand fulfilled
third time
store1=20(old)+10
store2=20(old)+10
-- available quantity is over and hence stopped.
My code below-
=================================================
int prorate_allocation()
char *function = "prorate_allocation";
long t_cnt_st;
int t_innersize;
int t_qty_ordered;
int t_cnt_lp;
bool t_complete;
sql_cursor alloc_cursor;
EXEC SQL DECLARE c_order CURSOR FOR -- cursor to get orders, item in that, inner size and available qty.
SELECT oh.order_no,
ol.item,
isc.inner_pack_size,
ol.qty_ordered
FROM ABRL_ALC_CHG_TEMP_ORDHEAD oh,
ordloc ol,
item_supp_country isc
WHERE oh.order_no=ol.order_no
AND oh.supplier=isc.supplier
and ol.item=isc.item
AND EXISTS (SELECT 1 FROM abrl_alc_chg_details aacd WHERE oh.order_no=aacd.order_no)
AND ol.qty_ordered>0;
char v_order_no[10];
char v_item[25];
double v_innersize;
char v_qty_ordered[12];
char v_alloc_no[11];
char v_location[10];
char v_qty_allocated[12];
int *store_quantities;
bool *store_processed_flag;
EXEC SQL OPEN c_order;
if (SQL_ERROR_FOUND)
sprintf(err_data,"CURSOR OPEN: cursor=c_order");
strcpy(table,"ORDHEAD, ORDLOC, ITEM_SUPP_COUNTRY");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
EXEC SQL ALLOCATE :alloc_cursor;
while(1)
EXEC SQL FETCH c_order INTO :v_order_no,
:v_item,
:v_innersize,
:v_qty_ordered;
if (SQL_ERROR_FOUND)
sprintf(err_data,"CURSOR FETCH: cursor=c_order");
strcpy(table,"ORDHEAD, ORDLOC, ITEM_SUPP_COUNTRY");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
if (NO_DATA_FOUND) break;
t_qty_ordered =atoi(v_qty_ordered);
t_innersize =(int)v_innersize;
t_cnt_lp = t_qty_ordered/t_innersize;
t_complete =FALSE;
EXEC SQL SELECT COUNT(*) INTO :t_cnt_st
FROM abrl_alc_chg_ad ad,
alloc_header ah
WHERE ah.alloc_no=ad.alloc_no
AND ah.order_no=:v_order_no
AND ah.item=:v_item
AND ad.qty_allocated!=0;
if SQL_ERROR_FOUND
sprintf(err_data,"SELECT: ALLOC_DETAIL, count = %s\n",t_cnt_st);
strcpy(table,"ALLOC_DETAIL");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
if (t_cnt_st>0)
store_quantities=(int *) calloc(t_cnt_st,sizeof(int));
store_processed_flag=(bool *) calloc(t_cnt_st,sizeof(bool));
EXEC SQL EXECUTE
BEGIN
OPEN :alloc_cursor FOR SELECT ad.alloc_no,
ad.to_loc,
ad.qty_allocated
FROM alloc_header ah,
abrl_alc_chg_ad ad
WHERE ah.alloc_no=ad.alloc_no
AND ah.item=:v_item
AND ah.order_no=:v_order_no
order by ad.qty_allocated desc;
END;
END-EXEC;
while (t_cnt_lp>0)
EXEC SQL WHENEVER NOT FOUND DO break;
for(int i=0;i<t_cnt_st;i++)
EXEC SQL FETCH :alloc_cursor INTO :v_alloc_no,
:v_location,
:v_qty_allocated;
if (store_quantities[i]!=(int)v_qty_allocated)
store_quantities[i]=store_quantities[i]+t_innersize;
t_cnt_lp--;
if (t_cnt_lp==0)
EXEC SQL CLOSE :alloc_cursor;
break;
else
if(store_processed_flag[i]==FALSE)
store_processed_flag[i]=TRUE;
t_cnt_st--;
if (t_cnt_st==0)
t_complete=TRUE;
break;
if (t_complete==TRUE && t_cnt_lp!=0)
for (int i=0;i<t_cnt_st;i++)
store_quantities[i]=store_quantities[i]+v_innersize;
t_cnt_lp--;
if (t_cnt_lp==0)
EXEC SQL CLOSE :alloc_cursor;
break;
}/*END OF WHILE*/
EXEC SQL EXECUTE
BEGIN
OPEN :alloc_cursor FOR SELECT ad.alloc_no,
ad.to_loc,
ad.qty_allocated
FROM alloc_header ah,
abrl_alc_chg_ad ad
WHERE ah.alloc_no=ad.alloc_no
AND ah.item=:v_item
AND ah.order_no=:v_order_no
order by ad.qty_allocated desc;
END;
END-EXEC;
EXEC SQL WHENEVER NOT FOUND DO break;
for (int i=0;i<t_cnt_st;i++)
EXEC SQL FETCH :alloc_cursor INTO :v_alloc_no,
:v_location,
:v_qty_allocated;
EXEC SQL UPDATE abrl_alc_chg_ad
SET qty_allocated=:store_quantities[i]
WHERE to_loc=:v_location
AND alloc_no=:v_alloc_no;
if SQL_ERROR_FOUND
sprintf(err_data,"UPDATE: ALLOC_DETAIL, location = %s , alloc_no =%s\n", v_location,v_alloc_no);
strcpy(table,"ALLOC_DETAIL");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
EXEC SQL UPDATE ABRL_ALC_CHG_DETAILS
SET PROCESSED='Y'
WHERE LOCATION=:v_location
AND alloc_no=:v_alloc_no
AND PROCESSED IN ('E','U');
if SQL_ERROR_FOUND
sprintf(err_data,"UPDATE: ABRL_ALC_CHG_DETAILS, location = %s , alloc_no =%s\n", v_location,v_alloc_no);
strcpy(table,"ABRL_ALC_CHG_DETAILS");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
EXEC SQL COMMIT;
EXEC SQL CLOSE :alloc_cursor;
free(store_quantities);
free(store_processed_flag);
}/*END OF IF*/
}/*END OF OUTER WHILE LOOP*/
EXEC SQL CLOSE c_order;
if SQL_ERROR_FOUND
sprintf(err_data,"CURSOR CLOSE: cursor = c_order");
strcpy(table,"ORDHEAD, ORDLOC, ITEM_SUPP_COUNTRY");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
return(0);
} /* end prorate_allocation*/
I have a code written to allocate quantities to stores for an existing order. Suppose there is a supplier order with quantity of 100 and this needs to distributed among 4 stores which has a demand of 50,40,30 and 20. Since total demand not equal to available quantity. the available quantity needs to be allocated to stores using an algorithm.
ALgorithm is like allocating the stores in small pieces of innersize. Innersize is nothing but
quantity within the pack of packs i.e. pack has 4 pieces and each pieces internally has 10 pieces,
this 10 is called innersize.
While allocating, each store is provided quantities of innersize first and this looping continues
until available quantity is over
Ex:
store1=10
store2=10
store3=10
store4=10
second time:
store1=10(old)+10
store2=10(old)+10
store3=10(old)+10
store4=10(old)+10--demand fulfilled
third time
store1=20(old)+10
store2=20(old)+10
-- available quantity is over and hence stopped.
My code below-
=================================================
int prorate_allocation()
char *function = "prorate_allocation";
long t_cnt_st;
int t_innersize;
int t_qty_ordered;
int t_cnt_lp;
bool t_complete;
sql_cursor alloc_cursor;
EXEC SQL DECLARE c_order CURSOR FOR -- cursor to get orders, item in that, inner size and available qty.
SELECT oh.order_no,
ol.item,
isc.inner_pack_size,
ol.qty_ordered
FROM ABRL_ALC_CHG_TEMP_ORDHEAD oh,
ordloc ol,
item_supp_country isc
WHERE oh.order_no=ol.order_no
AND oh.supplier=isc.supplier
and ol.item=isc.item
AND EXISTS (SELECT 1 FROM abrl_alc_chg_details aacd WHERE oh.order_no=aacd.order_no)
AND ol.qty_ordered>0;
char v_order_no[10];
char v_item[25];
double v_innersize;
char v_qty_ordered[12];
char v_alloc_no[11];
char v_location[10];
char v_qty_allocated[12];
int *store_quantities;
bool *store_processed_flag;
EXEC SQL OPEN c_order;
if (SQL_ERROR_FOUND)
sprintf(err_data,"CURSOR OPEN: cursor=c_order");
strcpy(table,"ORDHEAD, ORDLOC, ITEM_SUPP_COUNTRY");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
EXEC SQL ALLOCATE :alloc_cursor;
while(1)
EXEC SQL FETCH c_order INTO :v_order_no,
:v_item,
:v_innersize,
:v_qty_ordered;
if (SQL_ERROR_FOUND)
sprintf(err_data,"CURSOR FETCH: cursor=c_order");
strcpy(table,"ORDHEAD, ORDLOC, ITEM_SUPP_COUNTRY");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
if (NO_DATA_FOUND) break;
t_qty_ordered =atoi(v_qty_ordered);
t_innersize =(int)v_innersize;
t_cnt_lp = t_qty_ordered/t_innersize;
t_complete =FALSE;
EXEC SQL SELECT COUNT(*) INTO :t_cnt_st
FROM abrl_alc_chg_ad ad,
alloc_header ah
WHERE ah.alloc_no=ad.alloc_no
AND ah.order_no=:v_order_no
AND ah.item=:v_item
AND ad.qty_allocated!=0;
if SQL_ERROR_FOUND
sprintf(err_data,"SELECT: ALLOC_DETAIL, count = %s\n",t_cnt_st);
strcpy(table,"ALLOC_DETAIL");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
if (t_cnt_st>0)
store_quantities=(int *) calloc(t_cnt_st,sizeof(int));
store_processed_flag=(bool *) calloc(t_cnt_st,sizeof(bool));
EXEC SQL EXECUTE
BEGIN
OPEN :alloc_cursor FOR SELECT ad.alloc_no,
ad.to_loc,
ad.qty_allocated
FROM alloc_header ah,
abrl_alc_chg_ad ad
WHERE ah.alloc_no=ad.alloc_no
AND ah.item=:v_item
AND ah.order_no=:v_order_no
order by ad.qty_allocated desc;
END;
END-EXEC;
while (t_cnt_lp>0)
EXEC SQL WHENEVER NOT FOUND DO break;
for(int i=0;i<t_cnt_st;i++)
EXEC SQL FETCH :alloc_cursor INTO :v_alloc_no,
:v_location,
:v_qty_allocated;
if (store_quantities[i]!=(int)v_qty_allocated)
store_quantities[i]=store_quantities[i]+t_innersize;
t_cnt_lp--;
if (t_cnt_lp==0)
EXEC SQL CLOSE :alloc_cursor;
break;
else
if(store_processed_flag[i]==FALSE)
store_processed_flag[i]=TRUE;
t_cnt_st--;
if (t_cnt_st==0)
t_complete=TRUE;
break;
if (t_complete==TRUE && t_cnt_lp!=0)
for (int i=0;i<t_cnt_st;i++)
store_quantities[i]=store_quantities[i]+v_innersize;
t_cnt_lp--;
if (t_cnt_lp==0)
EXEC SQL CLOSE :alloc_cursor;
break;
}/*END OF WHILE*/
EXEC SQL EXECUTE
BEGIN
OPEN :alloc_cursor FOR SELECT ad.alloc_no,
ad.to_loc,
ad.qty_allocated
FROM alloc_header ah,
abrl_alc_chg_ad ad
WHERE ah.alloc_no=ad.alloc_no
AND ah.item=:v_item
AND ah.order_no=:v_order_no
order by ad.qty_allocated desc;
END;
END-EXEC;
EXEC SQL WHENEVER NOT FOUND DO break;
for (int i=0;i<t_cnt_st;i++)
EXEC SQL FETCH :alloc_cursor INTO :v_alloc_no,
:v_location,
:v_qty_allocated;
EXEC SQL UPDATE abrl_alc_chg_ad
SET qty_allocated=:store_quantities[i]
WHERE to_loc=:v_location
AND alloc_no=:v_alloc_no;
if SQL_ERROR_FOUND
sprintf(err_data,"UPDATE: ALLOC_DETAIL, location = %s , alloc_no =%s\n", v_location,v_alloc_no);
strcpy(table,"ALLOC_DETAIL");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
EXEC SQL UPDATE ABRL_ALC_CHG_DETAILS
SET PROCESSED='Y'
WHERE LOCATION=:v_location
AND alloc_no=:v_alloc_no
AND PROCESSED IN ('E','U');
if SQL_ERROR_FOUND
sprintf(err_data,"UPDATE: ABRL_ALC_CHG_DETAILS, location = %s , alloc_no =%s\n", v_location,v_alloc_no);
strcpy(table,"ABRL_ALC_CHG_DETAILS");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
EXEC SQL COMMIT;
EXEC SQL CLOSE :alloc_cursor;
free(store_quantities);
free(store_processed_flag);
}/*END OF IF*/
}/*END OF OUTER WHILE LOOP*/
EXEC SQL CLOSE c_order;
if SQL_ERROR_FOUND
sprintf(err_data,"CURSOR CLOSE: cursor = c_order");
strcpy(table,"ORDHEAD, ORDLOC, ITEM_SUPP_COUNTRY");
WRITE_ERROR(SQLCODE,function,table,err_data);
return(-1);
return(0);
} /* end prorate_allocation*/
Similar Messages
-
EP6 sp12 Performance Issue, Need help to improve performance
We have a Portal development environment with EP6.0 sp12.
What we are experiencing is performance issue, It's not extremely slow, but slow compared to normal ( compared to our prod box). For example, after putting the username and password and clicking the <Log on> Button it's taking more than 10 secs for the first home page to appear. Also currently we have hooked the Portal with 3 xAPPS system and one BW system. The time taken for a BW query to appear ( with selection screen) is also more than 10 secs. However access to one other xAPPS is comparatively faster.
Do we have a simple to use guide( Not a very elaborate one) with step by step guidance to immediately improve the performance of the Portal.
Simple guide, easy to implement, with immediate effect is what we are looking for in the short term
Thanks
ArunabhaHi Eric,
I have searched but didn't find the Portal Tuning and Optimization Guide as you have suggested, Can you help to find this.
Subrato,
This is good and I would obviously read through this, The issue here is this is only for Network.
But do you know any other guide, which as very basic ( may be 10 steps) and show step by step the process, it would be very helpful. I already have some information from the thread Portal Performance - page loads slow, client cache reset/cleared too often
But really looking for answer ( steps to do it quickly and effectively) instead of list of various guides.
It would be very helpful if you or anybody( who has actually done some performance tuning) can send a basic list of steps that I can do immediately, instead of reading through these large guides.
I know I am looking for a shortcut, but this is the need of the hour.
Thanks
Arun -
RE: Need help to improve performance!!
Hi Experts,
There is an standard SAP tcode FPREPT which is to re-print a receipt. The execution of the Query time takes 5+ minutes.
Can anybody suggest me the best way to improve this and if hlp me with any SAP note available for the same.
vishalHi,
Check this note
Note 607651 - FPREPT/FPY1: Performance for receipt number assignment
It is a old one for release 471 (FI-CA)
What is your release ?
Regards -
I need help getting my os x mountain lion to work for my pro tools
I need help getting my os x mountain lion to work for my pro tools
Since you provide no details I can do nothing but guess, so perhaps this will help:
http://avid.force.com/pkb/articles/en_US/how_to/Upgrading-to-Mac-OS-10-8?popup=t rue&NewLang=en&DocType=1080
http://avid.force.com/pkb/articles/en_US/compatibility/Avid-Software-and-Mac-OS- X-10-8?popup=true&NewLang=en&DocType=1083
If you continue to have problems, you probably should contact Avid support.
Regards. -
Need help w/ setting up ports to run a server for America's Army
Need help w/ setting up ports to run a server for America's Army. I read wat u need to change the ports but i dont understand wat to put. here is wat the site says
Q: How do I run my own server?
A: Quick and dirty server info:
1. Edit RunServer.bat to change the map.
2. Run RunServer.bat
Or:
server.exe LAN MAPNAME.aao (Host a LAN game)
server.exe global MAPNAME.aao (Host a Public game)
Also: When you create a server setup and want to allow other users to join your server, you need make sure the following ports are open for outgoing and incoming traffic in your firewall: 1716 (UDP), 1717 (UDP), 20025-20045 (TCP), and 20047 (TCP). Failure to open these ports will prevent the server from accepting connections from other players or prevent other players from being able to see your server online.
There are several settings that also need to be defined in your server configuration INI file (in the Windows version, these files are located in “My Documents\America’s Army Server Settings\{settings file name}.ini”).
[Engine.GameEngine]
ServerActors=Andromeda.AndromedaMBS
[Andromeda.Andromeda]
GameServerIp=
Make sure that you set the actual IP address of the America’s Army Server under GameServerIp= (for example, “GameServerIp=000.000.000.000”). The supplied address must be your actual internet IP address, if this is left blank or you supply the IP address for your internal network (such as 192.168.0.x), your server will not be able to accept connections from the internet.
If your server.ini file contains the setting shown below, please change the QueryPort setting to 20025. This setting can also be removed, as the default setting is port 20025.
[Andromeda.AndromedaMBS]
QueryPort=20025
Punkbuster user fix correction.
If [Engine.GameEngine] block has been changed to read as below:
[Engine.GameEngine]
ServerActors=IPDrv.AndromedaMBS
Please add the following block to your INI file:
[IpDrv.AndromedaMBS]
QueryPort=20025
(Last Updated: 2006-04-20)Your images are not stored in the catalog. They are stored in folders on your computer. If you imported images that were already on your computer using the "Add" Option they are still in that same folder. If you imported images from your camera then they are in the folders that you specified when you imported. The catalog points to those images wherever they are located, and records all of the adjustments that you make to the image. When you send an image to Photoshop for further editing and save that image in Photoshop, it is normally saved back in the same folder as the original image.
Images are not "saved" in Lightroom. The basic default workflow in Lightroom is to store all of the adjustments in the catalog, leaving the original image completely unmodified. The catalog becomes the central controlling mechanism. It is a database that contains pointers to where the images are located and a record of all adjustments made to those images using Lightroom. Properly managed, you only have those original master files and secondary files for the ones that you have sent to Photoshop for further adjustment. When you want to provide a copy for someone else, you use the export dialogue for that purpose. I often export JPEG images to share with others or to post on the web. After I have usedthe JPEG for its intended purpose I delete it. -
Need help in improving the performance for the sql query
Thanks in advance for helping me.
I was trying to improve the performance of the below query. I tried the following methods used merge instead of update, used bulk collect / Forall update, used ordered hint, created a temp table and upadated the target table using the same. The methods which I used did not improve any performance. The data count which is updated in the target table is 2 million records and the target table has 15 million records.
Any suggestions or solutions for improving performance are appreciated
SQL query:
update targettable tt
set mnop = 'G',
where ( x,y,z ) in
select a.x, a.y,a.z
from table1 a
where (a.x, a.y,a.z) not in (
select b.x,b.y,b.z
from table2 b
where 'O' = b.defg
and mnop = 'P'
and hijkl = 'UVW';987981 wrote:
I was trying to improve the performance of the below query. I tried the following methods used merge instead of update, used bulk collect / Forall update, used ordered hint, created a temp table and upadated the target table using the same. The methods which I used did not improve any performance. And that meant what? Surely if you spend all that time and effort to try various approaches, it should mean something? Failures are as important teachers as successes. You need to learn from failures too. :-)
The data count which is updated in the target table is 2 million records and the target table has 15 million records.Tables have rows btw, not records. Database people tend to get upset when rows are called records, as records exist in files and a database is not a mere collection of records and files.
The failure to find a single faster method with the approaches you tried, points to that you do not know what the actual performance problem is. And without knowing the problem, you still went ahead, guns blazing.
The very first step in dealing with any software engineering problem, is to identify the problem. Seeing the symptoms (slow performance) is still a long way from problem identification.
Part of identifying the performance problem, is understanding the workload. Just what does the code task the database to do?
From your comments, it needs to find 2 million rows from 15 million rows. Change these rows. And then write 2 million rows back to disk.
That is not a small workload. Simple example. Let's say that the 2 million row find is 1ms/row and the 2 million row write is also 1ms/row. This means a 66 minute workload. Due to the number of rows, an increase in time/row either way, will potentially have 2 million fold impact.
So where is the performance problem? Time spend finding the 2 million rows (where other tables need to be read, indexes used, etc)? Time spend writing the 2 million rows (where triggers and indexes need to be fired and maintained)? Both? -
Need Help with site performance
Looking for Help..
In particular we would like help from experts in ssl, browser experts
(how browsers handle encryption, de-encryption), iPlanet experts, Sun
crypto card experts, webdesign for performance experts.
Our website is hosted on a Sun Enterprise 450 server running Solaris v7
The machine is hosted at Exodus. These are the following software
servers that perform the core functions of the website:
iPlanet Web Server v. 4.1 ( Java server is enabled)
IBM db2 v. 7.1
SAA uses SmartSite, a proprietary system developed by Adaptations
(www.adaptations.com). At the level of individual HTML pages, SmartSite
uses
proprietary markup tags and Tcl code embedded in HTML comments to
publish
content stored in a database. SmartSite allows for control over when,
how and
to whom content appears. It is implemented as a java servlet which
stores its data on the db2 server and uses a tcl like scripting language
(jacl- orginally developed by Sun)
CHALLENGE:
In late June this year we launched a redesigned website with ssl enabled
on all pages. (a departure from the previous practice of maintaining
most of the site on non-secure server and only some pages on a ssl
server). We also introduced a new website design with greater use of
images, nested tables and javascript.
We have found that the introduction of the "secure everywhere" policy
has had a detrimental effect on the web site user experience, due to
decreased web server and web browser performance. In other words, the
site got slower. Specifically, we have
identified the following problems:
1. Web server performance degradation. Due to unidentified increases in
web
server resource demand caused (probably) by the global usage of SSL, the
web
server experienced instability. This was resolved by increasing the
amount of
operating system (OS) resources available to the server.
2. Web browser performance degradation. Several categories are noted:
2.1. Page load and rendering. Page load and rendering time has
increased dramatically on the new site, particularly in the case of
Netscape Navigator. Some of this may be attributed to the usage of SSL.
Particularly, the rendering time of complex tables and images may be
markedly slower on slower client machines.
2.2. Non-caching of content. Web browsers should not cache any content
derived from https on the local hard disk. The amount of RAM caching
ability varies form browser to browser, and machine to machine, but is
generally much less than for disk caching. In addition, some browser may
not cache content in RAM cache at all. The overall effect of reduced
caching is increased accesses to the web server to retrieve content.
This
will degrade server performance, as it services more content, and also
web browser performance, as it will spend more time waiting for page
content before and while rendering it.
Things that have been attempted to improve performance:
1) Reducing javascript redundancy (less compiling time required)
2) Optimizing HTML code (taking out nested tables, hard coding in specs
where possible to reduce compiling time)
3) Optimizing page content assembly (reducing routine redundancy,
enabling things to be compiled ahead of time)
4) Installing an encryption card (to speed page encryption rate) - was
removed as it did not seem to improve performance, but seemed to have
degraded performanceFred Martinez wrote:
Looking for Help..
In particular we would like help from experts in ssl, browser experts
(how browsers handle encryption, de-encryption), iPlanet experts, Sun
crypto card experts, webdesign for performance experts.
Our website is hosted on a Sun Enterprise 450 server running Solaris v7
The machine is hosted at Exodus. These are the following software
servers that perform the core functions of the website:
iPlanet Web Server v. 4.1 ( Java server is enabled)
IBM db2 v. 7.1
SAA uses SmartSite, a proprietary system developed by Adaptations
(www.adaptations.com). Since I don't see iPlanet's application server in the mix here this (a
newsgroup
for performance questions for iAS) is not the newsgroup to ask in.
Kent -
Need help in copying Invoice date to lower level item in Sales order report
Hello Experts,
I am debugging into one Sales order report.I need little bit help.The report is displaying Invoice Date for
Sales order Billing documents for Higher item in Bill of Material Structures.But as per user requirement,
I am supposed to show the Invoice date for lower level items also.The field for Higher level item is 'UEPOS'.
I want to copy the Invoice date for Higher level item to lower level item. Can you please guide me in the logic?
Thanking you in anticipation.
Best Regards,
HarishHi BreakPoint,
Thanks for the information.
I have applied the same way but it is showing only lower line items now.
Invoice dates for Higher level items are not there.
I am pasting the code here which I have applied.
Then you can give me more guidence.
This is to be done only for 'ZREP' sales orders.
if w_vbak-auart EQ 'ZREP' and w_vbak-uepos is not INITIAL.
read table t_final into w_final_ZREP with key vbeln = w_vbak-vbeln
posnr = w_vbak-uepos.
w_final-erdat_i = w_final_ZREP-erdat_i.
else.
if w_vbak-auart EQ 'ZREP' and w_vbak-uepos is INITIAL.
w_final-erdat_i = w_invdate.
endif.
endif.
Can you please sugest me changes here?
Best Regards,
Harish
Edited by: joshihaa on Jul 13, 2010 6:22 PM -
Need help... getting an error message "not enough storage" for icloud and it will not let me get passed the message
See if this helps.
Reboot the iPad by holding down on the sleep and home buttons at the same time for about 10-15 seconds until the Apple Logo appears - ignore the red slider - let go of the buttons. -
Need help installing adobe flash on mountain lion o/s for my mac
need help installing adobe flash on my mac with mountain lion operating system
What kind of problem are you running into? Have you gone to http://get.adobe.com/flashplayer?
Here's a help document that might assist you with common problems. If you continue to encounter problems, please post back.
Macintosh Installation Help -
Hi need help with cant buy game add on can buy from iTunes but not any game add on need help
To Contact iTunes Customer Service and request assistance
Use this Link > Apple Support iTunes Store Contact
Note:
Some In-App Purchases require a Credit Card. -
RKKBABS0 Performance Issues (Background Processing of CO99) for PM Orders
We are experiencing extremely long run times when batch processing through program RKKBABS0 in ECC 6.0 (just upgraded). The issue appears to be that the program is using the production order numbers to search against the EXKN table which contains no AUFNR or AUFPL information.
Has anyone experienced this same issue and how was it resolved?
Edited by: Ken Lundeen on Apr 9, 2010 9:17 PM
Edited by: Ken Lundeen on Apr 9, 2010 9:17 PM Table ESKN(I'm sorry you've waited over a year for a reply.)
We also have performance issue. In our case we do not use Service Entry sheets with maintenance or production orders; AUFNR will not be populated in table ESKN. We are unable to 'complete business' our maintenance and production orders using batch processing because of performance.
We use Oracle database, which uses full table scan in this situation. But Secondary index (MANDT and AUFNR) is of no value anyway, we have about 12 million records with client and blank AUFNR field.
Our solution is a combination of a modification and a new index. OSS pilot note "1532483 - Performance of RKKBABS0 CHECK_ENTRYSHEET when reading ESKN" is a modification which introduces code improvements especially if running in background and closing several orders. Because we only have one client, we also created a new index consisting only of AUFNR. Oracle will not add a row to the secondary index of all fields of the index are null, making our new index very small. We then udpated Oracle stats to ensure Oracle would choose our new index.
We can now 'complete business' a single order online in under a minute, and the batch program runs much more efficiently.
This is not a perfect solution, but it has been a useful workaround for us. I hope this is useful to you. -
Needed help to improve the performance of a select query?
Hi,
I have been preparing a report which involves data to be fetched from 4 to 5 different tables and calculation has to performed on some columns also,
i planned to write a single cursor to populate 1 temp table.i have used INLINE VIEW,EXISTS more frequently in the select query..please go through the query and suggest me a better way to restructure the query.
cursor c_acc_pickup_incr(p_branch_code varchar2, p_applDate date, p_st_dt date, p_ed_dt date) is
select sca.branch_code "BRANCH",
sca.cust_ac_no "ACCOUNT",
to_char(p_applDate, 'YYYYMM') "YEARMONTH",
sca.ccy "CURRENCY",
sca.account_class "PRODUCT",
sca.cust_no "CUSTOMER",
sca.ac_desc "DESCRIPTION",
null "LOW_BAL",
null "HIGH_BAL",
null "AVG_CR_BAL",
null "AVG_DR_BAL",
null "CR_DAYS",
null "DR_DAYS",
--null "CR_TURNOVER",
--null "DR_TURNOVER",
null "DR_OD_DAYS",
(select sum(gf.limit_amount * (scal.linkage_percentage / 100)) +
(case when (p_applDate >= sca.tod_limit_start_date and
p_applDate <= nvl(sca.tod_limit_end_date, p_applDate)) then
sca.tod_limit else 0 end) dd
from getm_facility gf, sttm_cust_account_linkages scal
where gf.line_code || gf.line_serial = scal.linked_ref_no
and cust_ac_no = sca.cust_ac_no) "OD_LIMIT",
--sc.credit_rating "CR_GRADE",
null "AVG_NET_BAL",
null "UNAUTH_OD_AMT",
sca.acy_blocked_amount "AMT_BLOCKED",
(select sum(amt)
from ictb_entries_history ieh
where ieh.acc = sca.cust_ac_no
and ieh.brn = sca.branch_code
and ieh.drcr = 'D'
and ieh.liqn = 'Y'
and ieh.entry_passed = 'Y'
and ieh.ent_dt between p_st_dt and p_ed_dt
and exists (
select * from ictm_pr_int ipi, ictm_rule_frm irf
where ipi.product_code = ieh.prod
and ipi.rule = irf.rule_id
and irf.book_flag = 'B')) "DR_INTEREST",
(select sum(amt)
from ictb_entries_history ieh
where ieh.acc = sca.cust_ac_no
and ieh.brn = sca.branch_code
and ieh.drcr = 'C'
and ieh.liqn = 'Y'
and ieh.entry_passed = 'Y'
and ieh.ent_dt between p_st_dt and p_ed_dt
and exists (
select * from ictm_pr_int ipi, ictm_rule_frm irf
where ipi.product_code = ieh.prod
and ipi.rule = irf.rule_id
and irf.book_flag = 'B')) "CR_INTEREST",
(select sum(amt) from ictb_entries_history ieh
where ieh.brn = sca.branch_code
and ieh.acc = sca.cust_ac_no
and ieh.ent_dt between p_st_dt and p_ed_dt
and exists (
select product_code
from ictm_product_definition ipd
where ipd.product_code = ieh.prod
and ipd.product_type = 'C')) "FEE_INCOME",
sca.record_stat "ACC_STATUS",
case when (trunc(sca.ac_open_date,'MM') = trunc(p_applDate,'MM')
and not exists (select 1
from ictm_tdpayin_details itd
where itd.multimode_payopt = 'Y'
and itd.brn = sca.branch_code
and itd.acc = sca.cust_ac_no
and itd.multimode_offset_brn is not null
and itd.multimode_tdoffset_acc is not null))
then 1 else 0 end "NEW_ACC_FOR_THE_MONTH",
case when (trunc(sca.ac_open_date,'MM') = trunc(p_applDate,'MM')
and trunc(sc.cif_creation_date,'MM') = trunc(p_applDate,'MM')
and not exists (select 1
from ictm_tdpayin_details itd
where itd.multimode_payopt = 'Y'
and itd.brn = sca.branch_code
and itd.acc = sca.cust_ac_no
and itd.multimode_offset_brn is not null
and itd.multimode_tdoffset_acc is not null))
then 1 else 0 end "NEW_ACC_FOR_NEW_CUST",
(select 1 from dual
where exists (select 1 from ictm_td_closure_renew itcr
where itcr.brn = sca.branch_code
and itcr.acc = sca.cust_ac_no
and itcr.renewal_date = sysdate)
or exists (select 1 from ictm_tdpayin_details itd
where itd.multimode_payopt = 'Y'
and itd.brn = sca.branch_code
and itd.acc = sca.cust_ac_no
and itd.multimode_offset_brn is not null
and itd.multimode_tdoffset_acc is not null)) "RENEWED_OR_ROLLOVER",
(select maturity_date from ictm_acc ia
where ia.brn = sca.branch_code
and ia.acc = sca.cust_ac_no) "MATURITY_DATE",
sca.ac_stat_no_dr "DR_DISALLOWED",
sca.ac_stat_no_cr "CR_DISALLOWED",
sca.ac_stat_block "BLOCKED_ACC", Not Reqd
sca.ac_stat_dormant "DORMANT_ACC",
sca.ac_stat_stop_pay "STOP_PAY_ACC", --New
sca.ac_stat_frozen "FROZEN_ACC",
sca.ac_open_date "ACC_OPENING_DT",
sca.address1 "ADD_LINE_1",
sca.address2 "ADD_LINE_2",
sca.address3 "ADD_LINE_3",
sca.address4 "ADD_LINE_4",
sca.joint_ac_indicator "JOINT_ACC",
sca.acy_avl_bal "CR_BAL",
0 "DR_BAL",
0 "CR_BAL_LCY", t
0 "DR_BAL_LCY",
null "YTD_CR_MOVEMENT",
null "YTD_DR_MOVEMENT",
null "YTD_CR_MOVEMENT_LCY",
null "YTD_DR_MOVEMENT_LCY",
null "MTD_CR_MOVEMENT",
null "MTD_DR_MOVEMENT",
null "MTD_CR_MOVEMENT_LCY",
null "MTD_DR_MOVEMENT_LCY",
'N' "BRANCH_TRFR", --New
sca.provision_amount "PROVISION_AMT",
sca.account_type "ACCOUNT_TYPE",
nvl(sca.tod_limit, 0) "TOD_LIMIT",
nvl(sca.sublimit, 0) "SUB_LIMIT",
nvl(sca.tod_limit_start_date, global.min_date) "TOD_START_DATE",
nvl(sca.tod_limit_end_date, global.max_date) "TOD_END_DATE"
from sttm_cust_account sca, sttm_customer sc
where sca.branch_code = p_branch_code
and sca.cust_no = sc.customer_no
and ( exists (select 1 from actb_daily_log adl
where adl.ac_no = sca.cust_ac_no
and adl.ac_branch = sca.branch_code
and adl.trn_dt = p_applDate
and adl.auth_stat = 'A')
or exists (select 1 from catm_amount_blocks cab
where cab.account = sca.cust_ac_no
and cab.branch = sca.branch_code
and cab.effective_date = p_applDate
and cab.auth_stat = 'A')
or exists (select 1 from ictm_td_closure_renew itcr
where itcr.acc = sca.cust_ac_no
and itcr.brn = sca.branch_code
and itcr.renewal_date = p_applDate)
or exists (select 1 from sttm_ac_stat_change sasc
where sasc.cust_ac_no = sca.cust_ac_no
and sasc.branch_code = sca.branch_code
and sasc.status_change_date = p_applDate
and sasc.auth_stat = 'A')
or exists (select 1 from cstb_acc_brn_trfr_log cabtl
where cabtl.branch_code = sca.branch_code
and cabtl.cust_ac_no = sca.cust_ac_no
and cabtl.process_status = 'S'
and cabtl.process_date = p_applDate)
or exists (select 1 from sttbs_provision_history sph
where sph.branch_code = sca.branch_code
and sph.cust_ac_no = sca.cust_ac_no
and sph.esn_date = p_applDate)
or exists (select 1 from sttms_cust_account_dormancy scad
where scad.branch_code = sca.branch_code
and scad.cust_ac_no = sca.cust_ac_no
and scad.dormancy_start_dt = p_applDate)
or sca.maker_dt_stamp = p_applDate
or sca.status_since = p_applDate
l_tb_acc_det ty_tb_acc_det_int;
l_brnrec cvpks_utils.rec_brnlcy;
l_acbr_lcy sttms_branch.branch_lcy%type;
l_lcy_amount actbs_daily_log.lcy_amount%type;
l_xrate number;
l_dt_rec sttm_dates%rowtype;
l_acc_rec sttm_cust_account%rowtype;
l_acc_stat_row ty_r_acc_stat;
Edited by: user13710379 on Jan 7, 2012 12:18 AMI see it more like shown below (possibly with no inline selects
Try to get rid of the remaining inline selects ( left as an exercise ;) )
and rewrite traditional joins as ansi joins as problems might arise using mixed syntax as I have to leave so I don't have time to complete the query
select sca.branch_code "BRANCH",
sca.cust_ac_no "ACCOUNT",
to_char(p_applDate, 'YYYYMM') "YEARMONTH",
sca.ccy "CURRENCY",
sca.account_class "PRODUCT",
sca.cust_no "CUSTOMER",
sca.ac_desc "DESCRIPTION",
null "LOW_BAL",
null "HIGH_BAL",
null "AVG_CR_BAL",
null "AVG_DR_BAL",
null "CR_DAYS",
null "DR_DAYS",
-- null "CR_TURNOVER",
-- null "DR_TURNOVER",
null "DR_OD_DAYS",
w.dd "OD_LIMIT",
-- sc.credit_rating "CR_GRADE",
null "AVG_NET_BAL",
null "UNAUTH_OD_AMT",
sca.acy_blocked_amount "AMT_BLOCKED",
x.dr_int "DR_INTEREST",
x.cr_int "CR_INTEREST",
y.fee_amt "FEE_INCOME",
sca.record_stat "ACC_STATUS",
case when trunc(sca.ac_open_date,'MM') = trunc(p_applDate,'MM')
and not exists(select 1
from ictm_tdpayin_details itd
where itd.multimode_payopt = 'Y'
and itd.brn = sca.branch_code
and itd.acc = sca.cust_ac_no
and itd.multimode_offset_brn is not null
and itd.multimode_tdoffset_acc is not null
then 1
else 0
end "NEW_ACC_FOR_THE_MONTH",
case when (trunc(sca.ac_open_date,'MM') = trunc(p_applDate,'MM')
and trunc(sc.cif_creation_date,'MM') = trunc(p_applDate,'MM')
and not exists(select 1
from ictm_tdpayin_details itd
where itd.multimode_payopt = 'Y'
and itd.brn = sca.branch_code
and itd.acc = sca.cust_ac_no
and itd.multimode_offset_brn is not null
and itd.multimode_tdoffset_acc is not null
then 1
else 0
end "NEW_ACC_FOR_NEW_CUST",
(select 1 from dual
where exists(select 1
from ictm_td_closure_renew itcr
where itcr.brn = sca.branch_code
and itcr.acc = sca.cust_ac_no
and itcr.renewal_date = sysdate
or exists(select 1
from ictm_tdpayin_details itd
where itd.multimode_payopt = 'Y'
and itd.brn = sca.branch_code
and itd.acc = sca.cust_ac_no
and itd.multimode_offset_brn is not null
and itd.multimode_tdoffset_acc is not null
) "RENEWED_OR_ROLLOVER",
m.maturity_date "MATURITY_DATE",
sca.ac_stat_no_dr "DR_DISALLOWED",
sca.ac_stat_no_cr "CR_DISALLOWED",
-- sca.ac_stat_block "BLOCKED_ACC", --Not Reqd
sca.ac_stat_dormant "DORMANT_ACC",
sca.ac_stat_stop_pay "STOP_PAY_ACC", --New
sca.ac_stat_frozen "FROZEN_ACC",
sca.ac_open_date "ACC_OPENING_DT",
sca.address1 "ADD_LINE_1",
sca.address2 "ADD_LINE_2",
sca.address3 "ADD_LINE_3",
sca.address4 "ADD_LINE_4",
sca.joint_ac_indicator "JOINT_ACC",
sca.acy_avl_bal "CR_BAL",
0 "DR_BAL",
0 "CR_BAL_LCY", t
0 "DR_BAL_LCY",
null "YTD_CR_MOVEMENT",
null "YTD_DR_MOVEMENT",
null "YTD_CR_MOVEMENT_LCY",
null "YTD_DR_MOVEMENT_LCY",
null "MTD_CR_MOVEMENT",
null "MTD_DR_MOVEMENT",
null "MTD_CR_MOVEMENT_LCY",
null "MTD_DR_MOVEMENT_LCY",
'N' "BRANCH_TRFR", --New
sca.provision_amount "PROVISION_AMT",
sca.account_type "ACCOUNT_TYPE",
nvl(sca.tod_limit, 0) "TOD_LIMIT",
nvl(sca.sublimit, 0) "SUB_LIMIT",
nvl(sca.tod_limit_start_date, global.min_date) "TOD_START_DATE",
nvl(sca.tod_limit_end_date, global.max_date) "TOD_END_DATE"
from sttm_cust_account sca,
sttm_customer sc,
(select sca.cust_ac_no
sum(gf.limit_amount * (scal.linkage_percentage / 100)) +
case when p_applDate >= sca.tod_limit_start_date
and p_applDate <= nvl(sca.tod_limit_end_date, p_applDate)
then sca.tod_limit else 0
end
) dd
from sttm_cust_account sca
getm_facility gf,
sttm_cust_account_linkages scal
where gf.line_code || gf.line_serial = scal.linked_ref_no
and cust_ac_no = sca.cust_ac_no
group by sca.cust_ac_no
) w,
(select acc,
brn,
sum(decode(drcr,'D',amt)) dr_int,
sum(decode(drcr,'C',amt)) cr_int
from ictb_entries_history ieh
where ent_dt between p_st_dt and p_ed_dt
and drcr in ('C','D')
and liqn = 'Y'
and entry_passed = 'Y'
and exists(select null
from ictm_pr_int ipi,
ictm_rule_frm irf
where ipi.rule = irf.rule_id
and ipi.product_code = ieh.prod
and irf.book_flag = 'B'
group by acc,brn
) x,
(select acc,
brn,
sum(amt) fee_amt
from ictb_entries_history ieh
where ieh.ent_dt between p_st_dt and p_ed_dt
and exists(select product_code
from ictm_product_definition ipd
where ipd.product_code = ieh.prod
and ipd.product_type = 'C'
group by acc,brn
) y,
ictm_acc m,
(select sca.cust_ac_no,
sca.branch_code
coalesce(nvl2(coalesce(t1.ac_no,t1.ac_branch),'exists',null),
nvl2(coalesce(t2.account,t2.account),'exists',null),
nvl2(coalesce(t3.acc,t3.brn),'exists',null),
nvl2(coalesce(t4.cust_ac_no,t4.branch_code),'exists',null),
nvl2(coalesce(t5.cust_ac_no,t5.branch_code),'exists',null),
nvl2(coalesce(t6.cust_ac_no,t6.branch_code),'exists',null),
nvl2(coalesce(t7.cust_ac_no,t7.branch_code),'exists',null),
decode(sca.maker_dt_stamp,p_applDate,'exists'),
decode(sca.status_since,p_applDate,'exists')
) existence
from sttm_cust_account sca
left outer join
(select ac_no,ac_branch
from actb_daily_log
where trn_dt = p_applDate
and auth_stat = 'A'
) t1
on (sca.cust_ac_no = t1.ac_no
and sca.branch_code = t1.ac_branch
left outer join
(select account,account
from catm_amount_blocks
where effective_date = p_applDate
and auth_stat = 'A'
) t2
on (sca.cust_ac_no = t2.account
and sca.branch_code = t2.branch
left outer join
(select acc,brn
from ictm_td_closure_renew itcr
where renewal_date = p_applDate
) t3
on (sca.cust_ac_no = t3.acc
and sca.branch_code = t3.brn
left outer join
(select cust_ac_no,branch_code
from sttm_ac_stat_change
where status_change_date = p_applDate
and auth_stat = 'A'
) t4
on (sca.cust_ac_no = t4.cust_ac_no
and sca.branch_code = t4.branch_code
left outer join
(select cust_ac_no,branch_code
from cstb_acc_brn_trfr_log
where process_date = p_applDate
and process_status = 'S'
) t5
on (sca.cust_ac_no = t5.cust_ac_no
and sca.branch_code = t5.branch_code
left outer join
(select cust_ac_no,branch_code
from sttbs_provision_history
where esn_date = p_applDate
) t6
on (sca.cust_ac_no = t6.cust_ac_no
and sca.branch_code = t6.branch_code
left outer join
(select cust_ac_no,branch_code
from sttms_cust_account_dormancy
where dormancy_start_dt = p_applDate
) t7
on (sca.cust_ac_no = t7.cust_ac_no
and sca.branch_code = t7.branch_code
) z
where sca.branch_code = p_branch_code
and sca.cust_no = sc.customer_no
and sca.cust_ac_no = w.cust_ac_no
and sca.cust_ac_no = x.acc
and sca.branch_code = x.brn
and sca.cust_ac_no = y.acc
and sca.branch_code = y.brn
and sca.cust_ac_no = m.acc
and sca.branch_code = m.brn
and sca.cust_ac_no = z.sca.cust_ac_no
and sca.branch_code = z.branch_code
and z.existence is not nullRegards
Etbin -
Need help troubleshooting poor performance loading cubes
I need ideas on how to troubleshoot performance issues we are having when loading our infocube. There are eight infopackages running in parallel to update the cube. Each infopackage can execute three datapackages at a time. The load performance is erractic. For example, if an infopackage needs five datapackages to load the data, data package 1 is sometimes the last one to complete. Sometimes the slow performance is in the Update Rules processing and other times it is on the Insert into the fact table.
Sometimes there are no performance problems and the load completes in 20 mins. Other times, the loads complete in 1.5+ hours.
Does anyone know how to tell which server a data package was executed on? Can someone tell me any transactions to use to monitor the loads while they are running to help pinpoint what the bottleneck is?
Thanks.
Regards,
RyanSome sugegstions:
1. Collect BW statistics for all the cubes. Goto RSA1 and go to the cube and on tool bar - tools - BW statistics. Check thed boxes to collect both OLAP and WHM.
2. Activate all the technical content cubes and reports and relevant objects. You will find them if you search with 0BWTC* in the business content.
3. Start loading data to the Technical content cubes.
4. There are a few reports out of these statistical cubes and run them and you will get some ideas.
5. Try to schedule sequentially instead of parallel loads.
Ravi Thothadri -
This query is running slw...please help to improve performance
SELECT Listnontranstextid,
Listnontransshort,
Listnontransmedium,
Listnontransextended
FROM (WITH TEXT_T
AS (SELECT /*+ index(TT pk_text_translation) */TT.TEXTID,
TT.short,
TT.medium,
TT.extended
FROM TEXT_TRANSLATION TT
WHERE TT.Active = 1
AND ( TT.Short <> 'Null'
OR TT.Medium <> 'Null'
OR TT.Extended <> 'Null')
AND TT.Languageid = @Langid
FUNC AS (SELECT FN.ID
FROM Function_ Fn
INNER JOIN Function_Type Fnty
ON Fn.Functiontype = Fnty.Functiontype
AND Fnty.Active = 1
INNER JOIN Operation_Step_Function Osf
ON (Osf.Functionid = Fn.Id)
AND Osf.Active = 1
INNER JOIN Operation_Step Os
ON Os.Id = Osf.Operationstepid
AND Os.Active = 1
INNER JOIN Operation Op
ON Op.Id = Os.Operationid
AND op.defaultoperationrevision = 1
AND Op.Active = 1
AND Op.Revisionstatusid NOT IN (2)
-- 2 means Operation Staus =Cancelled
WHERE FN.ACTIVE = 1
SELECT TT.Textid AS Listnontranstextid,
TT.Short AS Listnontransshort,
TT.Medium AS Listnontransmedium,
TT.Extended AS Listnontransextended
FROM function_translation ft
INNER JOIN TEXT_T TT
ON (TT.Textid = Ft.Textid)
INNER JOIN FUNC F
ON (F.ID = Ft.Functionid)
WHERE Ft.ACTIVE = 1
UNION
SELECT /*+ index(Forout IF_FUNCTION_OUTPUT_ROUTING_02) */ TT.Textid AS Listnontranstextid,
TT.Short AS Listnontransshort,
TT.Medium AS Listnontransmedium,
TT.Extended AS Listnontransextended
FROM Function_Output Fo
INNER JOIN Function_Output_Routing Forout
ON Forout.Functionoutputid = Fo.Id
INNER JOIN TEXT_T TT
ON TT.Textid = Forout.PromptTextid
INNER JOIN Function_Output_Routing_Type Fort
ON Fort.Id = Forout.Outputroutingtypeid
INNER JOIN Text_Translation Ttdt
ON Ttdt.Textid = Fort.Textid
AND Ttdt.Languageid = @Langid
AND UPPER (Ttdt.Extended) = ('USER')
INNER JOIN FUNC F
ON F.ID = FO.Functionid
UNION
SELECT TT.Textid AS Listnontranstextid,
TT.Short AS Listnontransshort,
TT.Medium AS Listnontransmedium,
TT.Extended AS Listnontransextended
FROM Function_Input Fi
INNER JOIN TEXT_T TT
ON (TT.Textid = Fi.Prompttextid)
INNER JOIN Function_Input_Source_Type Fist
ON Fist.Id = Fi.Inputsourcetypeid AND Fist.Active = 1
INNER JOIN Text_Translation Ttdt
ON Ttdt.Textid = Fist.Textid
AND Ttdt.Active = 1
AND Ttdt.Languageid = @Langid
AND UPPER (Ttdt.Extended) = ('USER')
INNER JOIN FUNC F
ON F.ID = FI.Functionid
UNION
SELECT TT.Textid AS Listnontranstextid,
TT.Short AS Listnontransshort,
TT.Medium AS Listnontransmedium,
TT.Extended AS Listnontransextended
FROM Function_Input_value Fiv
INNER JOIN function_input fi
ON fi.id = fiv.functioninputid
INNER JOIN TEXT_T TT
ON (TT.Textid = Fiv.textid)
INNER JOIN Function_Input_Source_Type Fist
ON Fist.Id = Fi.Inputsourcetypeid AND Fist.Active = 1
INNER JOIN Text_Translation Ttdt
ON Ttdt.Textid = Fist.Textid
AND Ttdt.Active = 1
AND Ttdt.Languageid = @Langid
AND UPPER (Ttdt.Extended) = ('USER')
INNER JOIN FUNC F
ON F.ID = FI.Functionid
UNION
SELECT TT.Textid AS Listnontranstextid,
TT.Short AS Listnontransshort,
TT.Medium AS Listnontransmedium,
TT.Extended AS Listnontransextended
FROM cob_t_ngmes_master_data ctnmt
INNER JOIN
text_translation tt
ON tt.textid = ctnmt.textid
WHERE tt.languageid = @Langid
UNION -- Swanand, PR 190540, Added this clause to get the reasoncodes
SELECT TT.Textid AS Listnontranstextid,
TT.Short AS Listnontransshort,
TT.Medium AS Listnontransmedium,
TT.Extended AS Listnontransextended
FROM Reason_Code RC
INNER JOIN Reason_Type RT
ON RT.ReasonType = RC.ReasonType
INNER JOIN TEXT_TRANSLATION TT1
ON TT1.textid = RT.textid
AND RT.ACTIVE = 1
AND TT1.ACTIVE = 1
AND ( TT1.Short <> 'Null'
OR TT1.Medium <> 'Null'
OR TT1.Extended <> 'Null')
INNER JOIN TEXT_T TT
ON TT.textid = RC.textid AND RC.ACTIVE = 1
WHERE TT1.Languageid = @Langid
UNION
SELECT TT.Textid AS Listnontranstextid,
TT.Short AS Listnontransshort,
TT.Medium AS Listnontransmedium,
TT.Extended AS Listnontransextended
FROM NSPT_T_Event_Type ET
INNER JOIN TEXT_TRANSLATION TT1 ON TT1.TextID =
ET.TextID AND TT1.ACTIVE = 1
INNER JOIN TEXT_T TT
ON TT.TextID = TT1.TextID
WHERE TT1.Languageid = @Langid
ORDER BY Listnontranstextid ASC) WHERE Listnontranstextid > @I_TextIDEdited by: 964145 on Oct 26, 2012 4:53 PMDuplicate post ? Query running slow....need performance tuning
Maybe you are looking for
-
Web Dynpro application callable object error: Page Builder Not available
Hi, SDN Fellow. I created a callable object of Web Dynpro application. The callable object is atatched to an Action, and Action --> Block, and Block --> Process. I initiate the process in GP Runtime, when it comes to the Action screen the Web Dynpro
-
File sender adapter not reading
Hi experts, We're getting an issue with the File sender channels in our system. None of the files we place in the source directory is read by our channels. These channels are NFS. We've already gave all permissions to SO user <sysid>ADM. The CC monit
-
I was migrating from macbook to a new imac. Nothing showed up until I turned off the imac. When I restarted the imac everythign I'd previously done was gome and the migrated macbook desktop had taken over. How do I bring back the original imac settin
-
Is it possible to create a video playlist and have the videos NOT show up in the TV Show/Movies/Music Video categories? I have video clips of various things and they aren't movies or tv shows. Is there a way around this? I've created a video playlist
-
I just picked up a new 120GB iPod Classic (after my trusty iPod Photo finally croaked) and there's a change that I wanted to query. I have a fair few playlists set up for various occasions - running,parties, etc. - and to keep those lists together, a