Performance problem (massive imap getUnreadMessageCount requests)

Hello,
I'm working on a server that's supposed to access the unread message count of many IMAP users (up to 40/second).
I'm facing performance issues : i can't get more than ~20/second.
I tried using multiple stores (and sessions) but it won't go faster. The performance bottleneck seems to be in the "connect" part.
I can provide the server configuration files if needed.
Maybe there's performance tuning I'm not aware of that I could use, can you help?
A very basic program that gets called way too often :
import java.util.Date;
import java.util.Properties;
import javax.mail.Folder;
import javax.mail.Session;
import com.sun.mail.imap.IMAPStore;
public class MessagesDao {
* singleton
private static MessagesDao instance = null;
* singleton
* @return
public static synchronized MessagesDao getInstance() {
if (instance == null) {
instance = new MessagesDao();
return instance;
private Session sessionImap;
* MessagesDao
private MessagesDao() {
final String portServeurImap = "143";
final String adresseServeurImap = "my_server";
final String imapConnectionPoolSize = "10";
final String imapConnectionPoolTimeout = "10";
final String imapTimeout = "2000";
final String imapConnectionTimeout = "2000";
final Properties propSessionImap = new Properties();
propSessionImap.put("mail.store.protocol", "imap");
propSessionImap.put("mail.imap.connectionpoolsize", imapConnectionPoolSize);
propSessionImap.put("mail.imap.connectionpooltimeout", imapConnectionPoolTimeout);
propSessionImap.put("mail.imap.timeout", imapTimeout);
propSessionImap.put("mail.imap.connectiontimeout", imapConnectionTimeout);
propSessionImap.put("mail.imap.port", portServeurImap);
propSessionImap.put("mail.imap.host", adresseServeurImap);
propSessionImap.put("mail.imap.auth", "true");
propSessionImap.put("mail.imap.separatestoreconnection", "true");
propSessionImap.put("mail.imap.appendbuffersize", -1);
propSessionImap.put("mail.debug", "false");
this.sessionImap = Session.getInstance(propSessionImap);
* user : someName@myDomain
public int getUnreadMessageCount(String user){
IMAPStore store = null;
try {
long timeStart = new Date().getTime();
store = (IMAPStore) this.sessionImap.getStore();
long timeGetStore = new Date().getTime();
store.connect(user, "fakepass");
long timeConnect = new Date().getTime();
Folder inbox = store.getFolder("inbox");
int count = inbox.getUnreadMessageCount();
System.out.println("-----> unread " + user + " : " + count + "( getStore : " + (timeGetStore - timeStart) + ", connect : " + (timeConnect - timeGetStore)+")");
return count;
} catch (Exception e) {
e.printStackTrace();
return -1;
} finally {
try {
store.close();
} catch (Exception e) {
Edited by: user13568923 on 29 déc. 2010 09:46

A single user never opens more than 2 folders at a time. I guess I don't really need the 10 folder pool...
Here's what the traffic looks like :
"5388","16.536513","cli","serv","TCP","46434 > imap [SYN] Seq=0 Win=5840 Len=0 MSS=1460 TSV=58648306 TSER=0 WS=6"
"5389","16.537545","serv","cli","TCP","imap > 46434 [SYN, ACK] Seq=0 Ack=1 Win=5792 Len=0 MSS=1460 TSV=3608644264 TSER=58648306 WS=7"
"5390","16.537567","cli","serv","TCP","46434 > imap [ACK] Seq=1 Ack=1 Win=5888 Len=0 TSV=58648306 TSER=3608644264"
"5391","16.539219","serv","cli","IMAP","Response: * OK Dovecot ready."
"5392","16.539251","cli","serv","TCP","46434 > imap [ACK] Seq=1 Ack=22 Win=5888 Len=0 TSV=58648307 TSER=3608644266"
"5394","16.540835","cli","serv","IMAP","Request: A0 CAPABILITY"
"5397","16.542140","serv","cli","TCP","imap > 46434 [ACK] Seq=22 Ack=16 Win=5888 Len=0 TSV=3608644269 TSER=58648307"
"5398","16.542399","serv","cli","IMAP","Response: * CAPABILITY IMAP4rev1 SASL-IR SORT THREAD=REFERENCES MULTIAPPEND UNSELECT LITERAL+ IDLE CHILDREN NAMESPACE LOGIN-REFERRALS QUOTA STARTTLS AUTH=PLAIN"
"5399","16.542733","cli","serv","IMAP","Request: A1 AUTHENTICATE PLAIN"
"5415","16.584176","serv","cli","TCP","imap > 46434 [ACK] Seq=202 Ack=39 Win=5888 Len=0 TSV=3608644310 TSER=58648307"
"5421","16.594994","serv","cli","IMAP","Response: + "
"5427","16.595570","cli","serv","IMAP","Request: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA etc=="
"5433","16.596586","serv","cli","TCP","imap > 46434 [ACK] Seq=206 Ack=129 Win=5888 Len=0 TSV=3608644322 TSER=58648321"
"6420","18.341472","serv","cli","IMAP","Response: A1 OK Logged in."
"6421","18.341639","cli","serv","IMAP","Request: A2 CAPABILITY"
"6437","18.370916","serv","cli","TCP","imap > 46434 [ACK] Seq=224 Ack=144 Win=5888 Len=0 TSV=3608646052 TSER=58648757"
"6526","18.602063","serv","cli","IMAP","Response: * CAPABILITY IMAP4rev1 SASL-IR SORT THREAD=REFERENCES MULTIAPPEND UNSELECT LITERAL+ IDLE CHILDREN NAMESPACE LOGIN-REFERRALS QUOTA"
"6528","18.604615","cli","serv","IMAP","Request: A3 LIST "" inbox"
"6542","18.642682","serv","cli","TCP","imap > 46434 [ACK] Seq=384 Ack=162 Win=5888 Len=0 TSV=3608646316 TSER=58648823"
"6555","18.668482","serv","cli","IMAP","Response: * LIST (\HasNoChildren) "." "INBOX""
"6557","18.668945","cli","serv","IMAP","Request: A4 STATUS INBOX (MESSAGES RECENT UNSEEN UIDNEXT UIDVALIDITY)"
"6578","18.712320","serv","cli","IMAP","Response: * STATUS "INBOX" (MESSAGES 1 RECENT 0 UIDNEXT 10 UIDVALIDITY 1271690632 UNSEEN 1)"
"6584","18.713558","cli","serv","IMAP","Request: A5 LOGOUT"
"6588","18.728700","serv","cli","IMAP","Response: * BYE Logging out"
"6589","18.728774","serv","cli","IMAP","Response: A5 OK Logout completed."
"6590","18.728914","cli","serv","TCP","46434 > imap [ACK] Seq=235 Ack=597 Win=8000 Len=0 TSV=58648854 TSER=3608646398"
"6591","18.729118","cli","serv","TCP","46434 > imap [FIN, ACK] Seq=235 Ack=597 Win=8000 Len=0 TSV=58648854 TSER=3608646398"
It seems to me that the problem is server-side, in the login part.
Edited by: user13568923 on 18 janv. 2011 00:41

Similar Messages

  • Serious performance problems with IMAP

    Hi,
    our Groupwise server runs on SLES10 SP4, has about 2500 accounts in 7 postoffices, one gwia, one mta and webaccess.
    When IMAP is activated on the gwia, the cpu load goes to 99% after a couple of hours, making the server unusable. When I turn IMAP off the cpu load is stable under 1%.
    We have already limited the number of items per folder to 1000.
    Where do I look? What do I do? I'm at my wits end!!!
    Mike

    On 1//20/2012 7:05 AM, Steven Eschenburg wrote:
    > We were having a similar issue with our 8.0.2 GWIA running under Windows
    > Server 2008 on a VM, and adding a second CPU cut the CPU utilization
    > down substantially.
    >
    >
    > >>> tgm its<[email protected]> 1/20/2012 7:36 AM >>>
    >
    > konecnya;2169415 Wrote:
    > > In article <[email protected]>, Tgm its wrote:
    > > > When IMAP is activated on the gwia, the cpu load goes to 99% after a
    > > > couple of hours, making the server unusable.
    > > >
    > > Is this just a single core single CPU system? If not make sure you are
    > > running the SMP kernel.
    > > uname -a
    > >
    > Yes, it is a single core singel CPU system. It's a virtual machine on a
    > SLES10 xen host.
    > uname -a:
    > Linux gw1 2.6.16.60-0.85.1-xen #1 SMP Thu Mar 17 11:45:06 UTC 2011
    > x86_64 x86_64 x86_64 GNU/Linux
    >
    > konecnya;2169415 Wrote:
    > >
    > > confirm that it is actually GWIA using up all that CPU and not
    > > something else or a mix of things. Knowing what exactly is busy will
    > > help.
    > > top
    > >
    > > With 'top' if you have enough width to your terminal session, hitting
    > > 'c' will tell you which POA is the offender if one is being the
    > > problem.
    > >
    > Yes, I'm sure its gwia and not a postoffice. Gwia is using 99% of the
    > cpu time.
    >
    > konecnya;2169415 Wrote:
    > >
    > > As per Michael, do make sure maintenance has been run on all the Post
    > > Offices and do so from the top-down approach as per TID 3347260
    > > (formerly TID 10007365)
    > >
    > http://www.novell.com/support/dynami...rd=nonthreaded
    > <http://www.novell.com/support/dynamickc.do?cmd=show&forward=nonthreaded>
    > > KC&docType=kc&externalId=3347260&sliceId=1
    > > If you don't have most of this automated, I wrote up the steps a while
    > > back that still does the trick, ideally not running them all at the
    > > same time on that system.
    > > 'GroupWise Maintenance' (http://www.konecnyad.ca/andyk/gwmnt5x.htm)
    >
    > Mike
    >
    >
    > --
    > tgm_its
    > ------------------------------------------------------------------------
    > tgm_its's Profile: http://forums.novell.com/member.php?userid=93411
    > View this thread: http://forums.novell.com/showthread.php?t=451021
    >
    >
    >
    Yes, this would be my next step. You do have a fair amount of users. Is
    allocating another core an option?

  • CRM IC Webclient - Massive Performance Problems Searching in Agent Inbox

    Hi Forum,
    can somebody help me. We have massive Search Performance problems in Interaction Center Webclient in Agent Inbox. When agents searching, for example for open emails, the time to get a result takes approx. 2-3 minutes.
    Thats absolute inadmissible and endangered the running business.
    The queury reading the eMails from the workitem tables is very very slow.
    Could somebody help me with ideas to solve this big performance problem. Have somebody the same problems?
    Thank you very much in advance for any information.
    We use Interaction Center Webclient on CRM Release 5.0
    Thorsten

    Another aspect you could check is the following:
    Analysis performed didn't show anything strange nor any high consumers
    of the response time. The processing time occurs, since IC-Web on CRM 5
    is a bit demanding for CPU power.
    The only thing that could improve the performance a bit is the
    <b>buffering of org structure</b>, which is currently switched off for
    SALES and SERVICE scenarios as per table T77OMATTSC.
    The report <b>HRBCI_ATTRIBUTES_BUFFER_UPDATE</b> is running regulary
    however no scenario is being placed in the buffer to speed up
    org structure read. Please check this and provide feedback.
    How to use this report, can be found back in the Reports own documentation and also in SAP HELP.
    cheers
    Davy

  • Messaging performance problem on RAID disk with many IMAP users

    The performance problem can be solved by tunning the disc RAID system.

    The performance problem can be solved by tunning the disc RAID system.

  • Performance problems when running PostgreSQL on ZFS and tomcat

    Hi all,
    I need help with some analysis and problem solution related to the below case.
    The long story:
    I'm running into some massive performance problems on two 8-way HP ProLiant DL385 G5 severs with 14 GB ram and a ZFS storage pool in raidz configuration. The servers are running Solaris 10 x86 10/09.
    The configuration between the two is pretty much the same and the problem therefore seems generic for the setup.
    Within a non-global zone I’m running a tomcat application (an institutional repository) connecting via localhost to a Postgresql database (the OS provided version). The processor load is typically not very high as seen below:
    NPROC USERNAME  SWAP   RSS MEMORY      TIME  CPU                            
        49 postgres  749M  669M   4,7%   7:14:38  13%
         1 jboss    2519M 2536M    18%  50:36:40 5,9%We are not 100% sure why we run into performance problems, but when it happens we experience that the application slows down and swaps out (according to below). When it settles everything seems to turn back to normal. When the problem is acute the application is totally unresponsive.
    NPROC USERNAME  SWAP   RSS MEMORY      TIME  CPU
        1 jboss    3104M  913M   6,4%   0:22:48 0,1%
    #sar -g 5 5
    SunOS vbn-back 5.10 Generic_142901-03 i86pc    05/28/2010
    07:49:08  pgout/s ppgout/s pgfree/s pgscan/s %ufs_ipf
    07:49:13    27.67   316.01   318.58 14854.15     0.00
    07:49:18    61.58   664.75   668.51 43377.43     0.00
    07:49:23   122.02  1214.09  1222.22 32618.65     0.00
    07:49:28   121.19  1052.28  1065.94  5000.59     0.00
    07:49:33    54.37   572.82   583.33  2553.77     0.00
    Average     77.34   763.71   771.43 19680.67     0.00Making more memory available to tomcat seemed to worsen the problem or at least didn’t prove to have any positive effect.
    My suspicion is currently focused on PostgreSQL. Turning off fsync boosted performance and made the problem less often to appear.
    An unofficial performance evaluation on the database with “vacuum analyze” took 19 minutes on the server and only 1 minute on a desktop pc. This is horrific when taking the hardware into consideration.
    The short story:
    I’m trying different steps but running out of ideas. We’ve read that the database block size and file system block size should match. PostgreSQL is 8 Kb and ZFS is 128 Kb. I didn’t find much information on the matter so if any can help please recommend how to make this change…
    Any other recommendations and ideas we could follow? We know from other installations that the above setup runs without a single problem on Linux on much smaller hardware without specific tuning. What makes Solaris in this configuration so darn slow?
    Any help appreciated and I will try to provide additional information on request if needed…
    Thanks in advance,
    Kasper

    raidz isnt a good match for databases. Databases tend to require good write performance for which mirroring works better.
    Adding a pair of SSD's as a ZIL would probably also help, but chances are its not an option for you..
    You can change the record size by "zfs set recordsize=8k <dataset>"
    It will only take effect for newly written data. Not existing data.

  • Jdeveloper dual core processor performance problem

    I have a dual core 2.4 ghz processor and 2 gig of ram and Im running Jdevloper 9.0.5.2 and my performace is terrible. Other developers in the company have non-dual core processors and they can start up in debug mode using the Jdeveloper embedded oc4j our application in 10 seconds where it takes me 4 min!!!!! It is a struts,ejb web application. Is there anything I can do to help in debug mode??? cheers.
    Murray

    Hi Bernard,
    Which version of McAfee are you using?
    On my (personal) laptop, I'm using McAfee VirusScan 9.0.10. I don't frequently run JDeveloper on this laptop, but when I do, it's not experiencing signficant startup delays (it's a very low power machine: PIII 650 512Mb)
    McAfee VirusScan seems to have very few configuration options (noticably different from Norton, which I use on my corporate desktop machine). I specifically remember changing the "File Types to Scan" option to "Program files and documents only". You can get to this by right clicking the "M" notification area icon, VirusScan->Options menu, Advanced button on the ActiveShield page.
    In Norton, I think I have it configured so that it only scans files on write rather than on read. I also exclude directories which contain jdeveloper installs or other large Java apps (although scanning only on write elliminates most of the performance problems anyway and still leaves your system reasonably secure).
    The easiest way to convince your MIS dept that the virus checker is the source of your problems might be if you ask them to allow you to turn it off in order to test the difference it makes to performance. It's a reasonable request to make if you're trying to elliminate possible causes for the slowdown (from the description you gave, it does sound like the AV upgrade is the first place I'd start looking).
    If the virus checker is the source of your problems, you'll probably be seeing massive slowness in most large Java applications that have a large number of JARs on their classpath.
    Thanks,
    Brian

  • Performance problem - XML and Hashtable

    Hello!
    I have a strange (or not strange, we don't know) performance problem. The situation is simple: our application runs on Oracle IAS, and gets XML messages from an MQ, and we have to process them.
    Sample message:
    <RequestDeal RequestId="RequestDeal-20090209200010-998493885" CurrentFragment="0" EndFragment="51" Expires="2009-03-7T08:00:10">
         <Deal>
              <Id>385011</Id>
              <ModifyDate>2009-02-09</ModifyDate>
              <PastDuesOnly>false</PastDuesOnly>
         </Deal>
         <Deal>
              <Id>385015</Id>
              <ModifyDate>2009-02-09</ModifyDate>
              <PastDuesOnly>false</PastDuesOnly>
         </Deal>
    </RequestDeal>(there's an average of 50000-80000 deals in the whole message)
    After the application receives the whole message, we'd like to parse it, and put the values into a hashtable for further processing:
        Hashtable ht = new Hashtable();
        Node eDeals = getElementNode(docReq, "/RequestDeal");
        Node eDeal = (Element)(eDeals.getFirstChild());
        long start = System.currentTimeMillis();
        while (eDeal != null) {
          String id = getElementText((Element)eDeal, "Id");
          Date modifyDate = getElementDateValue((Element)eDeal, "ModifyDate");
          boolean szukitett = getElementBool((Element)eDeal, "PastDuesOnly", false);
          ht.put(id, new UgyletStatusz(id, modifyDate, szukitett));
          eDeal = eDeal.getNextSibling();
        logger.info("Request adatok betöltve.");
        long end = System.currentTimeMillis();
        logger.info("Hosszu muvelet ideje: " + (end - start) + "ms");The problem is the while statement. On my PC it runs for 15-25 seconds, depends on the number of the deals; but on our customer's server, it runs for 2-5 hours with very high CPU load. The application uses oracle xml parser.
    On my PC there are Websphere MQ 5.3 and Oracle 10ias with 1.5 JVM.
    On our customers's server there are Websphere MQ 5.3 and Oracle 9ias with 1.4 HotSpot JVM.
    Do you have any idea, what can be the cause of the problem?

    gyulaur wrote:
    Hello!
    The problem is the while statement. On my PC it runs for 15-25 seconds, depends on the number of the deals; but on our customer's server, it runs for 2-5 hours with very high CPU load. The application uses oracle xml parser.
    On my PC there are Websphere MQ 5.3 and Oracle 10ias with 1.5 JVM.
    On our customers's server there are Websphere MQ 5.3 and Oracle 9ias with 1.4 HotSpot JVM.
    Do you have any idea, what can be the cause of the problem?All sorts of things are possible.
    - MQ is configured differently. For instance transactional versus non-transactional.
    - The customer uses a network (multiple computers) and you use a box (singlur) and something is using a lot of bandwidth on the network. Could be your process, one of the dependent processes or something else that has nothing to do with your system
    - The processing computer is loaded with something else or something is artificially limiting the processing time of your application (there is a app that allows one to limit another app to one CPU.)
    - At least one version of 1.4 had a xml bug that consumed massive amounts of memory when processing large xml. Sound familar? If the physical memory is not up to it then it will be thrashing the hard drive as it swaps virtual in and out.
    - Of course virtual memory causing swaps would impact it regardless of the cause.
    - The database is loaded.
    You can of course at least get the same version of java that they are using. Actually that would seem like a rather good idea to me regardless.

  • Problem in sending HTTP request to the server.

    Hi,
    i dveloped an ant script for sar deployment.
    i deployed a sar to my local soa server with ant script. it got deployed succesfully..
    but when i try to deploy to a remote server, getting the below error..
    "Problem in sending HTTP request to the server. Please make sure the server is up and/or check standard HTTP response code for 404"
    but the server is up and runnig and i am able to ping it from my machine and also access the console...
    below is my script
    build.properties
    wn.bea.home=C:/Oracle/Middleware
    all.needed.jars.path=D:/SourceCode/neededJAR
    oracle.soa.home=C:/Oracle/Middleware/Oracle_SOA1
    java.passed.home=C:/Oracle/Middleware/jdk160_24
    #Deployment environment
    deployment.plan.environment=DEV
    #Deploy Action
    deployAction =redeploy
    #credentials
    user=weblogic
    password=welcome1
    #For Composite deployment
    serverURL=http://10.177.154.6:7001
    forceDefault=true
    server=10.177.154.6
    port=7001
    sarLocation=D:/SourceCode/JAR
    build.xml
    <?xml version="1.0" encoding="iso-8859-1"?>
    <project name="soaDeployAll" default="deployAll">
         <echo>basedir ${basedir}</echo>
         <property environment="env"/>
    <echo>current folder ${basedir}</echo>
         <property file="${basedir}/build.properties"/>
         <taskdef resource="net/sf/antcontrib/antlib.xml">
         <classpath>
              <pathelement location="${all.needed.jars.path}/ant-contrib.jar"/>           
         </classpath>
         </taskdef>
         <target name="init">
              <tstamp>
                   <format property="timestamp" pattern="yyyy-MM-dd_HH-mm-ss"/>
              </tstamp>
              <property name="build.log.dir" location="${basedir}/buildlogs"/>
              <mkdir dir="${build.log.dir}"/> <property name="build.log.filename" value="build_${timestamp}.log"/>
              <record name="${build.log.dir}/${build.log.filename}" loglevel="verbose" append="false"/>
              <echo message="Build logged to ${build.log.filename}"/>
         </target>
         <target name="deployAll" depends="init">
         <echo>Deploy for environment ${deployment.plan.environment}</echo>
         <antcall target="deployAllComposites"/>
    </target>
    <!-- Following Actions are performed for Composite files in Managed Server - Deploy,Redeploy -->
         <target name="deployAllComposites" depends="init">
         <foreach target="deployComposites" param="Files">
              <fileset dir="${sarLocation}" casesensitive="no" includes="*.jar"/>
         </foreach>
         </target>
         <target name="deployComposites" depends="init">
         <basename file="${Files}" property="basename"/>
    <echo>Deploy Project ${basename} for environment ${deployment.plan.environment}</echo>
              <if>
                   <equals arg1="${deployAction}" arg2="deploy" />
                   <then>
                        <echo message="Deploying composites in Managed server........." />
                        <ant antfile="${oracle.soa.home}/bin/ant-sca-deploy.xml" inheritAll="true" target="deploy">
                             <property name="serverURL" value="${serverURL}"/>
                             <property name="user" value="${user}"/>
                             <property name="password" value="${password}"/>
                             <property name="overwrite" value="false"/>
                             <property name="forceDefault" value="${forceDefault}"/>
                             <property name="sarLocation" value="${sarLocation}/${basename}"/>
                        </ant>
                   </then>
                   <else>
                        <echo message="ReDeploying composites in Managed server........." />
                        <ant antfile="${oracle.soa.home}/bin/ant-sca-deploy.xml" inheritAll="true" target="deploy">
                             <property name="serverURL" value="${serverURL}"/>
                             <property name="user" value="${user}"/>
                             <property name="password" value="${password}"/>
                             <property name="overwrite" value="true"/>
                             <property name="forceDefault" value="${forceDefault}"/>
                             <property name="sarLocation" value="${sarLocation}/${basename}"/>                         
                        </ant>
                   </else>
              </if>
    </target>
    </project>
    please help....

    Hi,
    Give the serverURL as http://<host>:<managed.server.port>/soa-infra/deployer and try.
    e.g . http://10.177.154.6:8001/soa-infra/deployer
    Regards,
    Neeraj Sehgal

  • JRC 2: Performance Problem

    Hi.
    Our reporting component used JRC 1.x before we upgraded to JRC 2.x. We got two issues after upgrading.
    First issue I solved already with a workaround which I published on stackoverflow.com. (1) Does anyone knows where I will find the issue management system to report this issue?
    Second issue occurs big performance problem within our project. We opened a report with 6 subreports (which includes 1 upto 3 tables) in 2-4 seconds using JRC 1. If we will open same report using JRC 2, we wait upto 60 seconds.
    This methods requires more time with JRC 2 comparing to JRC 1:
    ReportClientDocument#open(String, int);
    SubreportController#setTableLocation(String, ITable, ITable)
    DatabaseController#setTableLocation(ITable, ITable)
    Each invocation of one of these methods requires 2-4 seconds.
    Thank you in advance.
    Best regards
    Thomas
    (1) http://stackoverflow.com/questions/479405/replace-a-database-connection-for-subreports-with-jrc

    hello ....
    my report is  ''crystal report 11'' => "OLE DB"  => "Add Command(select * from table) " .
    code(JRC) : eclipse + crystal report for eclipse version 2 =>  "cr4e-all-in-one-win_2.0.1.zip"
    <%@ page contentType="text/html; charset=UTF-8"
    import="
    com.crystaldecisions.report.web.viewer.CrystalReportViewer,
    com.crystaldecisions.reports.sdk.ReportClientDocument,
    com.crystaldecisions.sdk.occa.report.lib.ReportSDKExceptionBase,
    java.sql.Connection,
    java.sql.DriverManager,
    java.sql.ResultSet,
    java.sql.SQLException,
    java.sql.Statement" %>
    <%
         try {
              String reportName = "report.rpt";
              ReportClientDocument clientDoc = new ReportClientDocument();
              clientDoc.open(reportName, 0);
              String tableAlias = "Command";
              clientDoc.getDatabaseController().setDataSource(myResult("SELECT * FROM table"), tableAlias,tableAlias);
              CrystalReportViewer crystalReportPageViewer = new CrystalReportViewer();
              crystalReportPageViewer.setReportSource(clientDoc.getReportSource());
              crystalReportPageViewer.processHttpRequest(request, response, application, null);
         } catch (ReportSDKExceptionBase e) {
              e.printStackTrace();
             out.println(e);
    %>
    I simplified the code, *myResult("SELECT * FROM table") *  is absolutely no problem ,
    and this code is absolutely no problem in the "crystal report for eclipse "version 1
    but in  version 2 run error:
    com.crystaldecisions.sdk.occa.report.lib.ReportSDKException: u7121u6CD5u9810u671Fu7684u8CC7u6599u5EABu9023u7DDAu5668u932Fu8AA4---- Error code:-2147467259 Error code name:failed
         at com.businessobjects.reports.sdk.JRCCommunicationAdapter.a(Unknown Source)
         at com.businessobjects.reports.sdk.JRCCommunicationAdapter.a(Unknown Source)
         at com.businessobjects.reports.sdk.JRCCommunicationAdapter.if(Unknown Source)
         at com.businessobjects.reports.sdk.JRCCommunicationAdapter.a(Unknown Source)
         at com.businessobjects.reports.sdk.JRCCommunicationAdapter$2.a(Unknown Source)
         at com.businessobjects.reports.sdk.JRCCommunicationAdapter$2.call(Unknown Source)
         at com.crystaldecisions.reports.common.ThreadGuard.syncExecute(Unknown Source)
         at com.businessobjects.reports.sdk.JRCCommunicationAdapter.for(Unknown Source)
         at com.businessobjects.reports.sdk.JRCCommunicationAdapter.int(Unknown Source)
         at com.businessobjects.reports.sdk.JRCCommunicationAdapter.request(Unknown Source)
         at com.businessobjects.sdk.erom.jrc.a.a(Unknown Source)
         at com.businessobjects.sdk.erom.jrc.a.execute(Unknown Source)
         at com.crystaldecisions.proxy.remoteagent.RemoteAgent$a.execute(Unknown Source)
         at com.crystaldecisions.proxy.remoteagent.CommunicationChannel.a(Unknown Source)
         at com.crystaldecisions.proxy.remoteagent.RemoteAgent.a(Unknown Source)
         at com.crystaldecisions.sdk.occa.report.application.ReportClientDocument.if(Unknown Source)
         at com.crystaldecisions.sdk.occa.report.application.ReportClientDocument.a(Unknown Source)
         at com.crystaldecisions.sdk.occa.report.application.ReportClientDocument.new(Unknown Source)
         at com.crystaldecisions.sdk.occa.report.application.b9.onDataSourceChanged(Unknown Source)
         at com.crystaldecisions.sdk.occa.report.application.DatabaseController.a(Unknown Source)
         at com.crystaldecisions.sdk.occa.report.application.DatabaseController.a(Unknown Source)
         at com.crystaldecisions.sdk.occa.report.application.DatabaseController.setDataSource(Unknown Source)
         at org.apache.jsp.No_005f1.Eclipse_005fJTDS_005fSQL2005_005fTable_002dviewer_jsp._jspService(Eclipse_005fJTDS_005fSQL2005_005fTable_002dviewer_jsp.java:106)
         at org.apache.jasper.runtime.HttpJspBase.service(HttpJspBase.java:70)
         at javax.servlet.http.HttpServlet.service(HttpServlet.java:717)
         at org.apache.jasper.servlet.JspServletWrapper.service(JspServletWrapper.java:374)
         at org.apache.jasper.servlet.JspServlet.serviceJspFile(JspServlet.java:342)
         at org.apache.jasper.servlet.JspServlet.service(JspServlet.java:267)
         at javax.servlet.http.HttpServlet.service(HttpServlet.java:717)
         at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:290)
         at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:206)
         at org.apache.catalina.core.StandardWrapperValve.invoke(StandardWrapperValve.java:233)
         at org.apache.catalina.core.StandardContextValve.invoke(StandardContextValve.java:191)
         at org.apache.catalina.core.StandardHostValve.invoke(StandardHostValve.java:128)
         at org.apache.catalina.valves.ErrorReportValve.invoke(ErrorReportValve.java:102)
         at org.apache.catalina.core.StandardEngineValve.invoke(StandardEngineValve.java:109)
         at org.apache.catalina.connector.CoyoteAdapter.service(CoyoteAdapter.java:286)
         at org.apache.coyote.http11.Http11Processor.process(Http11Processor.java:845)
         at org.apache.coyote.http11.Http11Protocol$Http11ConnectionHandler.process(Http11Protocol.java:583)
         at org.apache.tomcat.util.net.JIoEndpoint$Worker.run(JIoEndpoint.java:447)
         at java.lang.Thread.run(Unknown Source)
    Caused by: com.crystaldecisions.reports.common.QueryEngineException: u7121u6CD5u9810u671Fu7684u8CC7u6599u5EABu9023u7DDAu5668u932Fu8AA4
         at com.crystaldecisions.reports.queryengine.Connection.bf(Unknown Source)
         at com.crystaldecisions.reports.queryengine.Rowset.z3(Unknown Source)
         at com.crystaldecisions.reports.queryengine.Rowset.bL(Unknown Source)
         at com.crystaldecisions.reports.queryengine.Rowset.zM(Unknown Source)
         at com.crystaldecisions.reports.queryengine.Connection.a(Unknown Source)
         at com.crystaldecisions.reports.queryengine.Table.a(Unknown Source)
         at com.crystaldecisions.reports.queryengine.Table.if(Unknown Source)
         at com.crystaldecisions.reports.queryengine.Table.try(Unknown Source)
         at com.crystaldecisions.reports.queryengine.Table.a(Unknown Source)
         at com.crystaldecisions.reports.queryengine.Table.u7(Unknown Source)
         at com.crystaldecisions.reports.datafoundation.DataFoundation.a(Unknown Source)
         at com.crystaldecisions.reports.dataengine.dfadapter.DFAdapter.a(Unknown Source)
         at com.crystaldecisions.reports.dataengine.dfadapter.CheckDatabaseHelper.a(Unknown Source)
         at com.crystaldecisions.reports.dataengine.datafoundation.CheckDatabaseCommand.new(Unknown Source)
         at com.crystaldecisions.reports.common.CommandManager.a(Unknown Source)
         at com.crystaldecisions.reports.common.Document.a(Unknown Source)
         at com.crystaldecisions.reports.dataengine.VerifyDatabaseCommand.new(Unknown Source)
         at com.crystaldecisions.reports.common.CommandManager.a(Unknown Source)
         at com.crystaldecisions.reports.common.Document.a(Unknown Source)
         at com.businessobjects.reports.sdk.requesthandler.f.a(Unknown Source)
         at com.businessobjects.reports.sdk.requesthandler.DatabaseRequestHandler.a(Unknown Source)
         at com.businessobjects.reports.sdk.requesthandler.DatabaseRequestHandler.if(Unknown Source)
         at com.businessobjects.reports.sdk.JRCCommunicationAdapter.do(Unknown Source)
         ... 39 more
    Please help me and tell me why....

  • Performance issue on 1 SQL request

    Hi,
    We have a performance problem. We have 2 systems. PRD and QAS (QAS is a copy of PRD as of September 2nd)
    SQL request is identical.
    table structures is identical.
    indexes are identical.
    views are identical
    DB stats have all been recalculated on both systems
    initSID.ora values are almost identical. only memory related parameters (and SID) are different.
    Obviously, data is different
    For you info, view ZBW_VIEW_EKPO fetched its info from tables EIKP, LFA1, EKKO and EKPO.
    Starting on September 15th, a query that used to take 10 minutes started taking over 120 minutes.
    I compared explain plans on both system and they are really different:
    SQL request:
    SELECT
      "MANDT" , "EBELN" , "EBELP" , "SAISO" , "SAISJ" , "AEDAT" , "AUREL" , "LOEKZ" , "INCO2" ,
      "ZZTRANSPORT" , "PRODA" , "ZZPRDHA" , "ZZMEM_DATE" , "KDATE" , "ZZHERKL" , "KNUMV" , "KTOKK"
    FROM
      "ZBW_VIEW_EKPO"
    WHERE
      "MANDT" = :A0#
    Explain plan for PRD:
    SELECT STATEMENT ( Estimated Costs = 300,452 , Estimated #Rows = 0 )
            8 HASH JOIN
              ( Estim. Costs = 300,451 , Estim. #Rows = 4,592,525 )
              Estim. CPU-Costs = 9,619,870,571 Estim. IO-Costs = 299,921
              Access Predicates
                1 TABLE ACCESS FULL EIKP
                  ( Estim. Costs = 353 , Estim. #Rows = 54,830 )
                  Estim. CPU-Costs = 49,504,995 Estim. IO-Costs = 350
                  Filter Predicates
                7 HASH JOIN
                  ( Estim. Costs = 300,072 , Estim. #Rows = 4,592,525 )
                  Estim. CPU-Costs = 9,093,820,218 Estim. IO-Costs = 299,571
                  Access Predicates
                    2 TABLE ACCESS FULL LFA1
                      ( Estim. Costs = 63 , Estim. #Rows = 812 )
                      Estim. CPU-Costs = 7,478,316 Estim. IO-Costs = 63
                      Filter Predicates
                    6 HASH JOIN
                      ( Estim. Costs = 299,983 , Estim. #Rows = 4,592,525 )
                      Estim. CPU-Costs = 8,617,899,244 Estim. IO-Costs = 299,508
                      Access Predicates
                        3 TABLE ACCESS FULL EKKO
                          ( Estim. Costs = 2,209 , Estim. #Rows = 271,200 )
                          Estim. CPU-Costs = 561,938,609 Estim. IO-Costs = 2,178
                          Filter Predicates
                        5 TABLE ACCESS BY INDEX ROWID EKPO
                          ( Estim. Costs = 290,522 , Estim. #Rows = 4,592,525 )
                          Estim. CPU-Costs = 6,913,020,784 Estim. IO-Costs = 290,141
                            4 INDEX SKIP SCAN EKPO~Z02
                              ( Estim. Costs = 5,144 , Estim. #Rows = 4,592,525 )
                              Search Columns: 2
                              Estim. CPU-Costs = 789,224,817 Estim. IO-Costs = 5,101
                             Access Predicates Filter Predicates
    Explain plan for QAS:
    SELECT STATEMENT ( Estimated Costs = 263,249 , Estimated #Rows = 13,842,540 )
            7 HASH JOIN
              ( Estim. Costs = 263,249 , Estim. #Rows = 13,842,540 )
              Estim. CPU-Costs = 59,041,893,935 Estim. IO-Costs = 260,190
              Access Predicates
                1 TABLE ACCESS FULL LFA1
                  ( Estim. Costs = 63 , Estim. #Rows = 812 )
                  Estim. CPU-Costs = 7,478,316 Estim. IO-Costs = 63
                  Filter Predicates
                6 HASH JOIN
                  ( Estim. Costs = 263,113 , Estim. #Rows = 13,842,540 )
                  Estim. CPU-Costs = 57,640,387,953 Estim. IO-Costs = 260,127
                  Access Predicates
                    4 HASH JOIN
                      ( Estim. Costs = 2,127 , Estim. #Rows = 194,660 )
                      Estim. CPU-Costs = 513,706,489 Estim. IO-Costs = 2,100
                      Access Predicates
                        2 TABLE ACCESS FULL EIKP
                          ( Estim. Costs = 351 , Estim. #Rows = 54,830 )
                          Estim. CPU-Costs = 49,504,995 Estim. IO-Costs = 348
                          Filter Predicates
                        3 TABLE ACCESS FULL EKKO
                          ( Estim. Costs = 1,534 , Estim. #Rows = 194,660 )
                          Estim. CPU-Costs = 401,526,622 Estim. IO-Costs = 1,513
                          Filter Predicates
                    5 TABLE ACCESS FULL EKPO
                      ( Estim. Costs = 255,339 , Estim. #Rows = 3,631,800 )
                      Estim. CPU-Costs = 55,204,047,516 Estim. IO-Costs = 252,479
                      Filter Predicates
    One more bit of information, PRD was copied to TST about a month ago and this one is also slow.
    I did almost anything I could think of.

    > DB stats have all been recalculated on both systems
    > initSID.ora values are almost identical. only memory related parameters (and SID) are different.
    > Obviously, data is different
    Ok, so you say: the parameters are different, the data is different and the statistics are different.
    I'm surprised that you still expect the plans to be the same...
    > For you info, view ZBW_VIEW_EKPO fetched its info from tables EIKP, LFA1, EKKO and EKPO.
    We will need to see the view definition !
    > Starting on September 15th, a query that used to take 10 minutes started taking over 120 minutes.
    Oh - Sep. 15th - that explains it ... just kiddin'.
    Ok, so it appears to be obvious that from that day on, the execution plan for the query was changed.
    If you're on Oracle 10g you may look it up again and also recall the CBO stats that had been used back then.
    > I compared explain plans on both system and they are really different:
    >
    > SQL request:
    >
    SELECT
    >   "MANDT" , "EBELN" , "EBELP" , "SAISO" , "SAISJ" , "AEDAT" , "AUREL" , "LOEKZ" , "INCO2" ,
    >   "ZZTRANSPORT" , "PRODA" , "ZZPRDHA" , "ZZMEM_DATE" , "KDATE" , "ZZHERKL" , "KNUMV" , "KTOKK"
    > FROM
    >   "ZBW_VIEW_EKPO"
    > WHERE
    >   "MANDT" = :A0#
    Ok - basically you fetch all rows from this view as MANDT is usually not a selection criteria at all.
    > Explain plan for PRD:

    SELECT STATEMENT ( Estimated Costs = 300,452 , Estimated #Rows = 0 )
    >
    >         8 HASH JOIN
    >           ( Estim. Costs = 300,451 , Estim. #Rows = 4,592,525 )
    >           Estim. CPU-Costs = 9,619,870,571 Estim. IO-Costs = 299,921
    >           Access Predicates
    >
    >             1 TABLE ACCESS FULL EIKP
    >               ( Estim. Costs = 353 , Estim. #Rows = 54,830 )
    >               Estim. CPU-Costs = 49,504,995 Estim. IO-Costs = 350
    >               Filter Predicates
    >             7 HASH JOIN
    >               ( Estim. Costs = 300,072 , Estim. #Rows = 4,592,525 )
    >               Estim. CPU-Costs = 9,093,820,218 Estim. IO-Costs = 299,571
    >               Access Predicates
    >
    >                 2 TABLE ACCESS FULL LFA1
    >                   ( Estim. Costs = 63 , Estim. #Rows = 812 )
    >                   Estim. CPU-Costs = 7,478,316 Estim. IO-Costs = 63
    >                   Filter Predicates
    >                 6 HASH JOIN
    >                   ( Estim. Costs = 299,983 , Estim. #Rows = 4,592,525 )
    >                   Estim. CPU-Costs = 8,617,899,244 Estim. IO-Costs = 299,508
    >                   Access Predicates
    >
    >                     3 TABLE ACCESS FULL EKKO
    >                       ( Estim. Costs = 2,209 , Estim. #Rows = 271,200 )
    >                       Estim. CPU-Costs = 561,938,609 Estim. IO-Costs = 2,178
    >                       Filter Predicates
    >                     5 TABLE ACCESS BY INDEX ROWID EKPO
    >                       ( Estim. Costs = 290,522 , Estim. #Rows = 4,592,525 )
    >                       Estim. CPU-Costs = 6,913,020,784 Estim. IO-Costs = 290,141
    >
    >                         4 INDEX SKIP SCAN EKPO~Z02
    >                           ( Estim. Costs = 5,144 , Estim. #Rows = 4,592,525 )
    >                           Search Columns: 2
    >                           Estim. CPU-Costs = 789,224,817 Estim. IO-Costs = 5,101
    >                          Access Predicates Filter Predicates
    Ok, we've no restriction to the data, so Oracle chooses the access methods it thinks are best for large volumes of data - Full table scans and HASH JOINS. The index skip scan is quite odd - maybe this is due to one of the join conditions.
    > Explain plan for QAS:

    SELECT STATEMENT ( Estimated Costs = 263,249 , Estimated #Rows = 13,842,540 )
    >
    >         7 HASH JOIN
    >           ( Estim. Costs = 263,249 , Estim. #Rows = 13,842,540 )
    >           Estim. CPU-Costs = 59,041,893,935 Estim. IO-Costs = 260,190
    >           Access Predicates
    >
    >             1 TABLE ACCESS FULL LFA1
    >               ( Estim. Costs = 63 , Estim. #Rows = 812 )
    >               Estim. CPU-Costs = 7,478,316 Estim. IO-Costs = 63
    >               Filter Predicates
    >             6 HASH JOIN
    >               ( Estim. Costs = 263,113 , Estim. #Rows = 13,842,540 )
    >               Estim. CPU-Costs = 57,640,387,953 Estim. IO-Costs = 260,127
    >               Access Predicates
    >
    >                 4 HASH JOIN
    >                   ( Estim. Costs = 2,127 , Estim. #Rows = 194,660 )
    >                   Estim. CPU-Costs = 513,706,489 Estim. IO-Costs = 2,100
    >                   Access Predicates
    >
    >                     2 TABLE ACCESS FULL EIKP
    >                       ( Estim. Costs = 351 , Estim. #Rows = 54,830 )
    >                       Estim. CPU-Costs = 49,504,995 Estim. IO-Costs = 348
    >                       Filter Predicates
    >                     3 TABLE ACCESS FULL EKKO
    >                       ( Estim. Costs = 1,534 , Estim. #Rows = 194,660 )
    >                       Estim. CPU-Costs = 401,526,622 Estim. IO-Costs = 1,513
    >                       Filter Predicates
    >
    >                 5 TABLE ACCESS FULL EKPO
    >                   ( Estim. Costs = 255,339 , Estim. #Rows = 3,631,800 )
    >                   Estim. CPU-Costs = 55,204,047,516 Estim. IO-Costs = 252,479
    >                   Filter Predicates
    Ok, we see significantly different table sizes here, but at least this second plan leaves out the superfluous Index Skip Scan.
    How to move on from here?
    1. Check whether you've installed all the current patches. Not all bugs that are in the system are hit all the time, so it may very well be that after new CBO stats were calculated you just begin to hit one of it.
    2. Make sure that all parameter recommendations are implemented on the systems. This is crucial for the CBO.
    3. Provide a description of the Indexes and the view definition.
    The easiest would be: perform an Oracle CBO trace and provide a download link to it.
    regards,
    Lars

  • Performance Problems - CPU

    Hi all,
    I'm having some performance problems and i have generated an AWR of a day and i have seen this following things:
    Top 5 Timed Events Avg %Total
    ~~~~~~~~~~~~~~~~~~ wait Call
    Event Waits Time (s) (ms) Time Wait Class
    CPU time 50,318 41.7
    db file sequential read 6,688,472 32,711 5 27.1 User I/O
    Backup: sbtwrite2 1,068,309 7,903 7 6.6 Administra
    db file scattered read 1,012,065 6,999 7 5.8 User I/O
    PX Deq Credit: send blkd 231,401 4,989 22 4.1 Other
    Operating System Statistics DB/Inst: CAPDB14P/capdb14p1 Snaps: 15710-15778
    Statistic Total
    AVG_BUSY_TIME 3,221,704
    AVG_IDLE_TIME 4,923,831
    AVG_IOWAIT_TIME 2,302,776
    AVG_SYS_TIME 537,429
    AVG_USER_TIME 2,682,900
    BUSY_TIME 6,446,121
    IDLE_TIME 9,850,381
    IOWAIT_TIME 4,608,322
    SYS_TIME 1,077,598
    USER_TIME 5,368,523
    LOAD 0
    OS_CPU_WAIT_TIME 1,999,898,469,700
    RSRC_MGR_CPU_WAIT_TIME 0
    VM_IN_BYTES 12,201,893,888
    VM_OUT_BYTES 476,655,616
    PHYSICAL_MEMORY_BYTES 8,568,512,512
    NUM_CPUS 2
    NUM_CPU_SOCKETS 2
    ###########################3
    I think that we are having CPU problems here !!
    All my memory caches are good, 99% hit.
    Anybody agree with me???
    Tks,
    Paulo

    I have problems on some queries that have another wait event related to RAC.
    "gc cs multi block request" is taking a lot of time on some queries. These queries run very fast at another databas that isn't a RAC database.
    Example:
    1-Tables has the same number of rows!!!!!
    2-Both tables and indexes are analyzed using the same tool (DBMA_STATS)
    ####RAC DATABASE####
    SELECT 1 from dual
    WHERE NOT EXISTS (SELECT 1
    FROM mensalidade a
    WHERE data_vencimento >= CHAR_TO_DATE('20070201'));
    ----Explain
    SELECT STATEMENT, GOAL = ALL_ROWS               4     1     
    FILTER                         
    FAST DUAL               2     1     
    PX COORDINATOR FORCED SERIAL                         
    PX SEND QC (RANDOM)     SYS     :TQ10000     2     1     7
    PX BLOCK ITERATOR               2     1     7
    INDEX FAST FULL SCAN     BRCAPDB2     IMENSALIDADE1     2     1     7
    ----It takes more than 500 seconds to run
    ####STANDALONE DATABASE####
    SELECT 1 from dual
    WHERE NOT EXISTS (SELECT 1
    FROM mensalidade a
    WHERE data_vencimento >= CHAR_TO_DATE('20070201'));
    ----Explain
    SELECT STATEMENT, GOAL = ALL_ROWS               4     1     
    FILTER                         
    FAST DUAL               2     1     
    PX COORDINATOR FORCED SERIAL                         
    PX SEND QC (RANDOM)     SYS     :TQ10000     2     2     16
    PX BLOCK ITERATOR               2     2     16
    TABLE ACCESS FULL     BRCAPDB2     MENSALIDADE     2     2     16
    ----It takes 0.1 seconds to run

  • Performance problems with EP 6 and MS IE

    Hi everybody,
    since a couple of days, we are facing a sever performance problem with our SAP EP 6.0. When I access the system with MS Internet Explorer 6.0, it takes 5-10 minutes after the Login. With Firefox Browser, the performance is ok. Therefore I assume that it must be a problem with the IE settings. Does anybody know a solution?
    Best regards,
       Michael

    There are a few things that this could be.  I've seen the setting "Empty Temporary Internet Files folder when browser is closed" cause a lot of performance problems (This is in the advanced settings in your IE).
    This will cause your cache to be cleared out each time the browser is closed and cause a lot more data to be downloaded each time you login to the system.
    For more analysis I'd recommend putting a tool like HTTPWatch into your IE browser and seeing which requests are using the most time.

  • Performance problems with DFSN, ABE and SMB

    Hello,
    We have identified a problem with DFS-Namespace (DFSN), Access Based Enumeration (ABE) and SMB File Service.
    Currently we have two Windows Server 2008 R2 servers providing the domain-based DFSN in functional level Windows Server 2008 R2 with activated ABE.
    The DFSN servers have the most current hotfixes for DFSN and SMB installed, according to http://support.microsoft.com/kb/968429/en-us and http://support.microsoft.com/kb/2473205/en-us
    We have only one AD-site and don't use DFS-Replication.
    Servers have 2 Intel X5550 4 Core CPUs and 32 GB Ram.
    Network is a LAN.
    Our DFSN looks like this:
    \\contoso.com\home
        Contains 10.000 Links
        Drive mapping on clients to subfolder \\contoso.com\home\username
    \\contoso.com\group
        Contains 2500 Links
        Drive mapping on clients directly to \\contoso.com\group
    On \\contoso.com\group we serve different folders for teams, projects and other groups with different access permissions based on AD groups.
    We have to use ABE, so that users see only accessible Links (folders)
    We encounter sometimes multiple times a day enterprise-wide performance problems for 30 seconds when accessing our Namespaces.
    After six weeks of researching and analyzing we were able to identify the exact problem.
    Administrators create a new DFS-Link in our Namespace \\contoso.com\group with correct permissions using the following command line:
    dfsutil.exe link \\contoso.com\group\project123 \\fileserver1\share\project123
    dfsutil.exe property sd grant \\contoso.com\group\project123 CONTOSO\group-project123:RX protect replace
    This is done a few times a day.
    There is no possibility to create the folder and set the permissions in one step.
    DFSN process on our DFSN-servers create the new link and the corresponding folder in C:\DFSRoots.
    At this time, we have for example 2000+ clients having an active session to the root of the namespace \\contoso.com\group.
    Active session means a Windows Explorer opened to the mapped drive or to any subfolder.
    The file server process (Lanmanserver) sends a change notification (SMB-Protocol) to each client with an active session \\contoso.com\group.
    All the clients which were getting the notification now start to refresh the folder listing of \\contoso.com\group
    This was identified by an network trace on our DFSN-servers and different clients.
    Due to ABE the servers have to compute the folder listing for each request.
    DFS-Service on the servers doen't respond for propably 30 seconds to any additional requests. CPU usage increases significantly over this period and went back to normal afterwards. On our hardware from about 5% to 50%.
    Users can't access all DFS-Namespaces during this time and applications using data from DFS-Namespace stop responding.
    Side effect: Windows reports on clients a slow-link detection for \\contoso.com\home, which can be offline available for users (described here for WAN-connections: http://blogs.technet.com/b/askds/archive/2011/12/14/slow-link-with-windows-7-and-dfs-namespaces.aspx)
    Problem doesn't occure when creating a link in \\contoso.com\home, because users have only a mapping to subfolders.
    Currently, the problem doesn't occure also for \\contoso.com\app, because users usually don't use Windows Explorer accessing this mapping.
    Disabling ABE reduces the DFSN freeze time, but doesn't solve the problem.
    Problem also occurs with Windows Server 2012 R2 as DFSN-server.
    There is a registry key available for clients to avoid the reponse to the change notification (NoRemoteChangeNotify, see http://support.microsoft.com/kb/812669/en-us)
    This might fix the problem with DFSN, but results in other problems for the users. For example, they have to press F5 for refreshing every remote directory on change.
    Is there a possibility to disable the SMB change notification on server side ?
    TIA and regards,
    Ralf Gaudes

    Hi,
    Thanks for posting in Microsoft Technet Forums.
    I am trying to involve someone familiar with this topic to further look at this issue. There might be some time delay. Appreciate your patience.
    Thank you for your understanding and support.
    Regards.
    We
    are trying to better understand customer views on social support experience, so your participation in this
    interview project would be greatly appreciated if you have time.
    Thanks for helping make community forums a great place.

  • Performance problems with XMLTABLE and XMLQUERY involving relational data

    Hello-
    Is anyone out there using XMLTABLE or XMLQUERY with more than a toy set of data? I am running into serious performance problems tyring to do basic things such as:
    * Combine records in 10 relational tables into a single table of XMLTYPE records using XMLTABLE. This hangs indefinitely for any more than 800 records. Oracle has confirmed that this is a problem and is working on a fix.
    * Combine a single XMLTYPE record with several relational code tables into a single XMLTYPE record using XMLQUERY and ora:view() to insert code descriptions after each code. Performance is 10 seconds for 10 records (terrible) passing a batch of records , or 160 seconds for one record (unacceptable!). How can it take 10 times longer to process 1/10th the number of records? Ironically, the query plan says it will do a full table scan of records for the batch, but an index access for the one record passed to the XMLQUERY.
    I am rapidly losing faith in XML DB, and desparately need some hints on how to work around these performance problems, or at least some assurance that others have been able to get this thing to perform.

    <Note>Long post, sorry.</Note>
    First, thanks for the responses above. I'm impressed with the quality of thought put into them. (Do the forum rules allow me to offer rewards? :) One suggestion in particular made a big performance improvement, and I’m encouraged to hear of good performance in pure XML situations. Unfortunately, I think there is a real performance challenge in two use cases that are pertinent to the XML+relational subject of this post and probably increasingly common as XML DB usage increases:
    •     Converting legacy tabular data into XML records; and
    •     Performing code table lookups for coded values in XML records.
    There are three things I want to accomplish with this post:
    •     Clarify what we are trying to accomplish, which might expose completely different approaches than I have tried
    •     Let you know what I tried so far and the rationale for my approach to help expose flaws in my thinking and share what I have learned
    •     Highlight remaining performance issues in hopes that we can solve them
    What we are trying to accomplish:
    •     Receive a monthly feed of 10,000 XML records (batched together in text files), each containing information about an employee, including elements that repeat for every year of service. We may need to process an annual feed of 1,000,000 XML records in the future.
    •     Receive a one-time feed of 500,000 employee records stored in about 10 relational tables, with a maximum join depth of 2 or 3. This is inherently a relational-to-XML process. One record/second is minimally acceptable, but 10 records/sec would be better.
    •     Consolidate a few records (from different providers) for each employee into a single record. Given the data volume, we need to achieve a minimum rate of 10 records per second. This may be an XML-only process, or XML+relational if code lookups are done during consolidation.
    •     Allow the records to be viewed and edited, with codes resolved into user-friendly descriptions. Since a user is sitting there, code lookups done when a record is viewed (vs. during consolidation) should not take more than 3 seconds total. We have about 20 code tables averaging a few hundred rows each, though one has 450,000 rows.
    As requested earlier, I have included code at the end of this post for example tables and queries that accurately (but simply) replicate our real system.
    Why we did and why:
    •     Stored the source XML records as CLOBS: We did this to preserve the records exactly as they were certified and sent from providers. In addition, we always access the entire XML record as a whole (e.g., when viewing a record or consolidating employee records), so this storage model seemed like a good fit. We can copy them into another format if necessary.
    •     Stored the consolidated XML employee records as “binary XML”. We did this because we almost always access a single, entire record as a whole (for view/edit), but might want to create some summary statistics at some point. Binary XML seemed the best fit.
    •     Used ora:view() for both tabular source records and lookup tables. We are not aware of any alternatives at this time. If it made sense, most code tables could be pre-converted into XML documents, but this seemed risky from a performance standpoint because the lookups use both code and date range constraints (the meaning of codes changes over time).
    •     Stored records as XMLTYPE columns in a table with other key columns on the table, plus an XMLTYPE metadata column. We thought this would facilitate pulling a single record (or a few records for a given employee) quickly. We knew this might be unnecessary given XML indexes and virtual columns, but were not experienced with those and wanted the comfort of traditional keys. We did not used XMLTYPE tables or the XML Repository for documents.
    •     Used XMLTABLE to consolidate XML records by looping over each distinct employee ID in the source batch. We also tried XMLQUERY and it seems to perform about the same. We can achieve 10 to 20 records/second if we do not do any code lookups during consolidation, just meeting our performance requirement, but still much slower than expected.
    •     Used PL/SQL with XMLFOREST to convert tabular source records to XML by looping over distinct employee IDs. We tried this outside PL/SQL both with XMLFOREST and XMLTABLE+ora:view(), but it hangs in both cases for more than 800 records (a known/open issue). We were able to get it to work by using an explicit cursor to loop over distinct employee IDs (rather than processing all records at once within the query). The performance is one record/second, which is minimally acceptable and interferes with other database activity.
    •     Used XMLQUERY plus ora:view() plus XPATH constraints to perform code lookups. When passing a single employee record, the response time ranges from 1 sec to 160 sec depending on the length of the record (i.e., number of years of service). We achieved a 5-fold speedup using an XMLINDEX (thank you Marco!!). The result may be minimally acceptable, but I’m baffled why the index would be needed when processing a single XML record. Other things we tried: joining code tables in the FOR...WHERE clauses, joining code tables using LET with XPATH constraints and LET with WHERE clause constraints, and looking up codes individually via JDBC from the application code at presentation time. All those approaches were slower. Note: the difference I mentioned above in equality/inequality constraint performance was due to data record variations not query plan variations.
    What issues remain?
    We have a minimally acceptable solution from a performance standpoint with one very awkward PL/SQL workaround. The performance of a mixed XML+relational data query is still marginal IMHO, until we properly utilize available optimizations, fix known problems, and perhaps get some new query optimizations. On the last point, I think the query plan for tabular lookups of codes in XML records is falling short right now. I’m reminded of data warehousing in the days before hash joins and star join optimization. I would be happy to be wrong, and just as happy for viable workarounds if I am right!
    Here are the details on our code lookup challenge. Additional suggestions would be greatly appreciated. I’ll try to post more detail on the legacy table conversion challenge later.
    -- The main record table:
    create table RECORDS (
    SSN varchar2(20),
    XMLREC sys.xmltype
    xmltype column XMLREC store as binary xml;
    create index records_ssn on records(ssn);
    -- A dozen code tables represented by one like this:
    create table CODES (
    CODE varchar2(4),
    DESCRIPTION varchar2(500)
    create index codes_code on codes(code);
    -- Some XML records with coded values (the real records are much more complex of course):
    -- I think this took about a minute or two
    DECLARE
    ssn varchar2(20);
    xmlrec xmltype;
    i integer;
    BEGIN
    xmlrec := xmltype('<?xml version="1.0"?>
    <Root>
    <Id>123456789</Id>
    <Element>
    <Subelement1><Code>11</Code></Subelement1>
    <Subelement2><Code>21</Code></Subelement2>
    <Subelement3><Code>31</Code></Subelement3>
    </Element>
    <Element>
    <Subelement1><Code>11</Code></Subelement1>
    <Subelement2><Code>21</Code></Subelement2>
    <Subelement3><Code>31</Code></Subelement3>
    </Element>
    <Element>
    <Subelement1><Code>11</Code></Subelement1>
    <Subelement2><Code>21</Code></Subelement2>
    <Subelement3><Code>31</Code></Subelement3>
    </Element>
    </Root>
    for i IN 1..100000 loop
    insert into records(ssn, xmlrec) values (i, xmlrec);
    end loop;
    commit;
    END;
    -- Some code data like this (ignoring date ranges on codes):
    DECLARE
    description varchar2(100);
    i integer;
    BEGIN
    description := 'This is the code description ';
    for i IN 1..3000 loop
    insert into codes(code, description) values (to_char(i), description);
    end loop;
    commit;
    end;
    -- Retrieve one record while performing code lookups. Takes about 5-6 seconds...pretty slow.
    -- Each additional lookup (times 3 repeating elements in the data) adds about 1 second.
    -- A typical real record has 5 Elements and 20 Subelements, meaning more than 20 seconds to display the record
    -- Note we are accessing a single XML record based on SSN
    -- Note also we are reusing the one test code table multiple times for convenience of this test
    select xmlquery('
    for $r in Root
    return
    <Root>
    <Id>123456789</Id>
    {for $e in $r/Element
        return
        <Element>
          <Subelement1>
            {$e/Subelement1/Code}
    <Description>
    {ora:view("disaac","codes")/ROW[CODE=$e/Subelement1/Code]/DESCRIPTION/text() }
    </Description>
    </Subelement1>
    <Subelement2>
    {$e/Subelement2/Code}
    <Description>
    {ora:view("disaac","codes")/ROW[CODE=$e/Subelement2/Code]/DESCRIPTION/text()}
    </Description>
    </Subelement2>
    <Subelement3>
    {$e/Subelement3/Code}
    <Description>
    {ora:view("disaac","codes")/ROW[CODE=$e/Subelement3/Code]/DESCRIPTION/text() }
    </Description>
    </Subelement3>
    </Element>
    </Root>
    ' passing xmlrec returning content)
    from records
    where ssn = '10000';
    The plan shows the nested loop access that slows things down.
    By contrast, a functionally-similar SQL query on relational data will use a hash join and perform 10x to 100x faster, even for a single record. There seems to be no way for the optimizer to see the regularity in the XML structure and perform a corresponding optimization in joining the code tables. Not sure if registering a schema would help. Using structured storage probably would. But should that be necessary given we’re working with a single record?
    Operation Object
    |SELECT STATEMENT ()
    | SORT (AGGREGATE)
    | NESTED LOOPS (SEMI)
    | TABLE ACCESS (FULL) CODES
    | XPATH EVALUATION ()
    | SORT (AGGREGATE)
    | NESTED LOOPS (SEMI)
    | TABLE ACCESS (FULL) CODES
    | XPATH EVALUATION ()
    | SORT (AGGREGATE)
    | NESTED LOOPS (SEMI)
    | TABLE ACCESS (FULL) CODES
    | XPATH EVALUATION ()
    | SORT (AGGREGATE)
    | XPATH EVALUATION ()
    | SORT (AGGREGATE)
    | XPATH EVALUATION ()
    | TABLE ACCESS (BY INDEX ROWID) RECORDS
    | INDEX (RANGE SCAN) RECORDS_SSN
    With an xmlindex, the same query above runs in about 1 second, so is about 5x faster (0.2 sec/lookup), which is almost good enough. Is this the answer? Or is there a better way? I’m not sure why the optimizer wants to scan the code tables and index into the (one) XML record, rather than the other way around, but maybe that makes sense if the optimizer wants to use the same general plan as when the WHERE clause constraint is relaxed to multiple records.
    -- Add an xmlindex. Takes about 2.5 minutes
    create index records_record_xml ON records(xmlrec)
    indextype IS xdb.xmlindex;
    Operation Object
    |SELECT STATEMENT ()
    | SORT (GROUP BY)
    | FILTER ()
    | NESTED LOOPS ()
    | FAST DUAL ()
    | TABLE ACCESS (BY INDEX ROWID) SYS113473_RECORDS_R_PATH_TABLE
    | INDEX (RANGE SCAN) SYS113473_RECORDS_R_PATHID_IX
    | SORT (AGGREGATE)
    | FILTER ()
    | TABLE ACCESS (FULL) CODES
    | FILTER ()
    | NESTED LOOPS ()
    | FAST DUAL ()
    | TABLE ACCESS (BY INDEX ROWID) SYS113473_RECORDS_R_PATH_TABLE
    | INDEX (RANGE SCAN) SYS113473_RECORDS_R_PATHID_IX
    | SORT (GROUP BY)
    | FILTER ()
    | NESTED LOOPS ()
    | FAST DUAL ()
    | TABLE ACCESS (BY INDEX ROWID) SYS113473_RECORDS_R_PATH_TABLE
    | INDEX (RANGE SCAN) SYS113473_RECORDS_R_PATHID_IX
    | SORT (AGGREGATE)
    | FILTER ()
    | TABLE ACCESS (FULL) CODES
    | FILTER ()
    | NESTED LOOPS ()
    | FAST DUAL ()
    | TABLE ACCESS (BY INDEX ROWID) SYS113473_RECORDS_R_PATH_TABLE
    | INDEX (RANGE SCAN) SYS113473_RECORDS_R_PATHID_IX
    | SORT (GROUP BY)
    | FILTER ()
    | NESTED LOOPS ()
    | FAST DUAL ()
    | TABLE ACCESS (BY INDEX ROWID) SYS113473_RECORDS_R_PATH_TABLE
    | INDEX (RANGE SCAN) SYS113473_RECORDS_R_PATHID_IX
    | SORT (AGGREGATE)
    | FILTER ()
    | TABLE ACCESS (FULL) CODES
    | FILTER ()
    | NESTED LOOPS ()
    | FAST DUAL ()
    | TABLE ACCESS (BY INDEX ROWID) SYS113473_RECORDS_R_PATH_TABLE
    | INDEX (RANGE SCAN) SYS113473_RECORDS_R_PATHID_IX
    | SORT (AGGREGATE)
    | FILTER ()
    | NESTED LOOPS ()
    | FAST DUAL ()
    | TABLE ACCESS (BY INDEX ROWID) SYS113473_RECORDS_R_PATH_TABLE
    | INDEX (RANGE SCAN) SYS113473_RECORDS_R_PATHID_IX
    | SORT (AGGREGATE)
    | TABLE ACCESS (BY INDEX ROWID) SYS113473_RECORDS_R_PATH_TABLE
    | INDEX (RANGE SCAN) SYS113473_RECORDS_R_PATHID_IX
    | TABLE ACCESS (BY INDEX ROWID) RECORDS
    | INDEX (RANGE SCAN) RECORDS_SSN
    Am I on the right path, or am I totally using the wrong approach? I thought about using XSLT but was unsure how to reference the code tables.
    I’ve done the best I can constraining the main record to a single row passed to the XMLQUERY. Given Mark’s post (thanks!) should I be joining and constraining the code tables in the SQL WHERE clause too? That’s going to make the query much more complicated, but right now we’re more concerned about performance than complexity.

  • OBIEE 11g performance problem

    Hi,
    I am facing a performance problem in OBIEE 11g. When I run the query taking from nqquery.log in database, it is giving me the result within few seconds. But In the OBIEE Answers the query runs forever not showing any data.
    Attaching the query below.
    Please help to solve.
    Thanks
    Titas
    [2012-10-16T18:07:34.000+00:00] [OracleBIServerComponent] [TRACE:2] [USER-23] [] [ecid: 3a39339b45a46ab4:-70b1919f:13a1f282668:-8000-00000000000769b2] [tid: 44475940] [requestid: 26e1001e] [sessionid: 26e10000] [username: weblogic] -------------------- General Query Info: [[
    Repository: Star, Subject Area: BM_BG Pascua Lama, Presentation: BG PL Project Analysis
    [2012-10-16T18:07:34.000+00:00] [OracleBIServerComponent] [TRACE:2] [USER-18] [] [ecid: 3a39339b45a46ab4:-70b1919f:13a1f282668:-8000-00000000000769b2] [tid: 44475940] [requestid: 26e1001e] [sessionid: 26e10000] [username: weblogic] -------------------- Sending query to database named XXBG Pascua Lama (id: <<26911>>), connection pool named Connection Pool, logical request hash e3feca59, physical request hash 5ab00db6: [[
    WITH
    SAWITH0 AS (select sum(T6051.COST_AMT_PROJ_RATE) as c1,
    sum(T6051.COST_AMOUNT) as c2,
    T6051.AFE_NUMBER as c3,
    T6051.BUDGET_OWNER as c4,
    T6051.COMMENTS as c5,
    T6051.COMMODITY as c6,
    T6051.COST_PERIOD as c7,
    T6051.COST_SOURCE as c8,
    T6051.COST_TYPE as c9,
    T6051.DATA_SEL as c10,
    T6051.FACILITY as c11,
    T6051.HISTORICAL as c12,
    T6051.OPERATING_UNIT as c13,
    T5633.project_number as c14,
    T5637.task_number as c15
    from
    (SELECT project_id proj_id
    ,segment1 project_number
    ,org_id
    FROM pa.pa_projects_all
    WHERE org_id IN (825, 865, 962, 2161)) T5633,
    (SELECT project_id proj_id
    ,task_id
    ,task_number
    ,task_name
    FROM pa.pa_tasks) T5637,
    (SELECT xxbg_pl_proj_analysis_cost_v.AFE_NUMBER,
    xxbg_pl_proj_analysis_cost_v.BUDGET_OWNER,
    xxbg_pl_proj_analysis_cost_v.COMMENTS,
    xxbg_pl_proj_analysis_cost_v.COMMODITY,
    xxbg_pl_proj_analysis_cost_v.COST_PERIOD,
    xxbg_pl_proj_analysis_cost_v.COST_SOURCE,
    xxbg_pl_proj_analysis_cost_v.COST_TYPE,
    xxbg_pl_proj_analysis_cost_v.FACILITY,
    xxbg_pl_proj_analysis_cost_v.HISTORICAL,
    xxbg_pl_proj_analysis_cost_v.PO_NUMBER_COST_CONTROL,
    xxbg_pl_proj_analysis_cost_v.PREVIOUS_PROJECT,
    xxbg_pl_proj_analysis_cost_v.PREV_AFE_NUMBER,
    xxbg_pl_proj_analysis_cost_v.PREV_COST_CONTROL_ACC_CODE,
    xxbg_pl_proj_analysis_cost_v.PREV_COST_TYPE,
    xxbg_pl_proj_analysis_cost_v.PROJECT_NUMBER,
    xxbg_pl_proj_analysis_cost_v.SUPPLIER_NAME,
    xxbg_pl_proj_analysis_cost_v.TASK_DESCRIPTION,
    xxbg_pl_proj_analysis_cost_v.TASK_NUMBER,
    xxbg_pl_proj_analysis_cost_v.TRANSACTION_NUMBER,
    xxbg_pl_proj_analysis_cost_v.WORK_PACKAGE,
    xxbg_pl_proj_analysis_cost_v.WP_OWNER,
    xxbg_pl_proj_analysis_cost_v.OPERATING_UNIT,
    xxbg_pl_proj_analysis_cost_v.DATA_SEL,
    pa_periods_all.PERIOD_NAME,
    xxbg_pl_proj_analysis_cost_v.ORG_ID,
    xxbg_pl_proj_analysis_cost_v.COST_AMT_PROJ_RATE COST_AMT_PROJ_RATE,
    xxbg_pl_proj_analysis_cost_v.COST_AMOUNT COST_AMOUNT,
    xxbg_pl_proj_analysis_cost_v.project_id,
    xxbg_pl_proj_analysis_cost_v.task_id
    FROM (select xpac.*,
    decode(xpac.historical, 'Y', 'Historical', 'N', 'Current') data_sel
    from apps.xxbg_pl_proj_analysis_cost_v xpac
    union
    select xpac.*, 'All' data_sel
    from apps.xxbg_pl_proj_analysis_cost_v xpac) xxbg_pl_proj_analysis_cost_v,
    (select period_name, org_id from apps.pa_periods_all) pa_periods_all
    WHERE ((xxbg_pl_proj_analysis_cost_v.ORG_ID = pa_periods_all.ORG_ID))
    AND (xxbg_pl_proj_analysis_cost_v.ORG_ID IN (825,865,962,2161))
    AND (APPS.XXBG_PL_PA_COMMITMENT_PKG.GET_LAST_DAY(xxbg_pl_proj_analysis_cost_v.COST_PERIOD) <=
    APPS.XXBG_PL_PA_COMMITMENT_PKG.GET_LAST_DAY(pa_periods_all.PERIOD_NAME))) T6051
    where ( T5633.proj_id = T5637.proj_id and T5633.project_number = 'SUDPALAPAS11' and T5637.proj_id = T6051.PROJECT_ID and T5637.task_id = T6051.TASK_ID and T5637.task_number = '2100.2000.01.BC0100' and T6051.DATA_SEL = 'All' and T6051.OPERATING_UNIT = 'Compañía Minera Nevada SpA' and T6051.PERIOD_NAME = 'JUL-12' )
    group by T5633.project_number, T5637.task_number, T6051.AFE_NUMBER, T6051.BUDGET_OWNER, T6051.COMMENTS, T6051.COMMODITY, T6051.COST_PERIOD, T6051.COST_SOURCE, T6051.COST_TYPE, T6051.DATA_SEL, T6051.FACILITY, T6051.HISTORICAL, T6051.OPERATING_UNIT)
    select D1.c1 as c1, D1.c2 as c2, D1.c3 as c3, D1.c4 as c4, D1.c5 as c5, D1.c6 as c6, D1.c7 as c7, D1.c8 as c8, D1.c9 as c9, D1.c10 as c10, D1.c11 as c11, D1.c12 as c12, D1.c13 as c13, D1.c14 as c14, D1.c15 as c15, D1.c16 as c16 from ( select distinct 0 as c1,
    D1.c3 as c2,
    D1.c4 as c3,
    D1.c5 as c4,
    D1.c6 as c5,
    D1.c7 as c6,
    D1.c8 as c7,
    D1.c9 as c8,
    D1.c10 as c9,
    D1.c11 as c10,
    D1.c12 as c11,
    D1.c13 as c12,
    D1.c14 as c13,
    D1.c15 as c14,
    D1.c2 as c15,
    D1.c1 as c16
    from
    SAWITH0 D1
    order by c13, c14, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12 ) D1 where rownum <= 65001

    Hi Titas,
    with such problems typically, at least for me, the cause turns out to be something simple and embarrassing like:
    - I am connected to another database,
    - The database is right but I have made some manual adjustments without committing them,
    - I have got the wrong query from the query log,
    - I have got the right query but my request is based on multiple queries.
    Do other OBIEE reports work fine?
    Have you tried removing columns one by one to see whether it makes a difference?
    -JR

Maybe you are looking for