Non-heap memory area spring leak  in JVM 1.50_6

Please let me question though it doesn't understand be good from this topic.
A problem occurs when testing as follows.
The problem is to generate memory leak in non-heap area.
%java CconnectTest2 129.24.34.68 130.104.10.50 6101 5000 185000 100
(%java -server CconnectTest2 source-ipaddress destination-ipaddress src-port dst-port thread-count)
CconnectTest2.java
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketAddress;
import java.net.SocketTimeoutException;
import java.net.UnknownHostException;
import java.nio.channels.IllegalBlockingModeException;
import java.util.Calendar;
public class CconnectTest2 {
     * @param bind_ip connect_ip connect_port wait_cntconnect_TIMEOUT thread_cnt
     public static void main(String[] args) {
          if ((args[0].equals("")) || (args[0]==null)) {
     System.err.println("bind_ip*");
     return ;
          if ((args[1].equals("")) || (args[1]==null)) {
     System.err.println("connect_ip*");
     return ;
          if ((args[2].equals("")) || (args[2]==null)) {
     System.err.println("connect_port*");
     return ;
          if ((args[3].equals("")) || (args[3]==null)) {
     System.err.println("wait_cnt*");
     return ;
          if ((args[4].equals("")) || (args[4]==null)) {
     System.err.println("connect_TIMEOUT*");
     return ;
          if ((args[5].equals("")) || (args[5]==null)) {
     System.err.println("thread_cnt*");
     return ;
String bind_ip          =args[0];
String connect_ip     =args[1];
int connect_port     =Integer.parseInt(args[2]);
int wait_cnt          =Integer.parseInt(args[3]);
int connect_TIMEOUT     =Integer.parseInt(args[4]);
int thread_cnt          =Integer.parseInt(args[5]);
System.out.println("bind_ip="+bind_ip);
System.out.println("connect_ip="+connect_ip);
System.out.println("connect_port="+connect_port);
System.out.println("wait_cnt="+wait_cnt);
System.out.println("connect_TIMEOUT="+connect_TIMEOUT);
System.out.println("thread_cnt="+thread_cnt);
int i=0;
while( true ){
i++;
if( i<= thread_cnt){
     ConnectExe a = new ConnectExe(connect_ip,bind_ip,connect_port,connect_TIMEOUT,wait_cnt) ;
a.start() ;
try {
Thread.sleep( 1000 ) ;
} catch (InterruptedException ex) {
     System.out.println("thread_start[ "+i+"]");
} else {
System.out.println("thread_start OK"+thread_cnt);
     break ;
while( true ){
System.gc() ;
try {
Thread.sleep( 20000 ) ;
} catch (InterruptedException ex) {
int hour = Calendar.getInstance().get(Calendar.HOUR_OF_DAY) ;
int min = Calendar.getInstance().get(Calendar.MINUTE) ;
int sec = Calendar.getInstance().get(Calendar.SECOND) ;
Runtime run = Runtime.getRuntime();
long freesize = 0 ;
freesize = (run.maxMemory()-run.totalMemory())+run.freeMemory() ;
if( freesize > 0 ){
freesize = freesize/1024 ;
System.out.println( "" + hour + ":" + min + ":" + sec
          + " HeepFreeSize:"+ freesize + " K"
+ " activeThread:" + Thread.activeCount() ) ;
static public class ConnectExe extends Thread{
String      serverAddress=null;
String      bindip=null;
int          port=6100;
int          timeout=0;
int          wait_cnt=0;
public ConnectExe(String ne,String bind,int connect_port,int timeout,int wait_cnt ){
     this.serverAddress           = ne ;
this.bindip                = bind ;
this.port                     = connect_port ;
this.timeout               = timeout;
this.wait_cnt               = wait_cnt;
public void run(){
while( true ){
     SocketAddress socketAddress = null;
     Socket socket = null;
     try {
          socket = new Socket();
          socket.setKeepAlive(true);
          byte[] ip = InetAddress.getByName(this.serverAddress).getAddress();
          InetAddress addr = InetAddress.getByAddress(this.serverAddress, ip);
          if (bindip == null) {
               socketAddress = new InetSocketAddress(addr, this.port);
          } else {
               socketAddress = new InetSocketAddress(addr, this.port);
               SocketAddress bindAddress = new InetSocketAddress(this.bindip, socket.getPort());
               socket.bind(bindAddress);
          System.out.println( "connect:" + this.serverAddress ) ;
          socket.connect(socketAddress, this.timeout);
          socket.close();
          System.out.println( "close :" + this.serverAddress ) ;
          socket = null;
     } catch (UnknownHostException e) {
          System.out.println("This socket cannot be connected with the server(UnknownHostException) " + e.getMessage() ) ;
          if (socket != null) {
               try {
                    socket.close();
               } catch (IOException e1) {
                    System.out.println( e1.getMessage() ) ;
               socket                = null;
               socketAddress      = null;
     } catch (SocketTimeoutException e) {
          System.out.println("This socket cannot be connected with the server (SocketTimeoutException) " + e.getMessage() ) ;
          if (socket != null) {
               try {
                    socket.close();
               } catch (IOException e1) {
                    System.out.println( e1.getMessage() ) ;
               socket                = null;
               socketAddress      = null;
     } catch (IllegalBlockingModeException e) {
          System.out.println("This socket cannot be connected with the server (IllegalBlockingModeException) " + e.getMessage() ) ;
          if (socket != null) {
               try {
                    socket.close();
               } catch (IOException e1) {
                    System.out.println( e1.getMessage() ) ;
               socket                = null;
               socketAddress      = null;
     } catch (IllegalArgumentException e) {
          System.out.println("This socket cannot be connected with the server (IllegalArgumentException) " + e.getMessage() ) ;
          if (socket != null) {
               try {
                    socket.close();
               } catch (IOException e1) {
                    System.out.println( e1.getMessage() ) ;
               socket                = null;
               socketAddress      = null;
     } catch (IOException e) {
          System.out.println("This socket cannot be connected with the server (IOException) " + e.getMessage() ) ;
          if (socket != null) {
               try {
                    socket.close();
               } catch (IOException e1) {
                    System.out.println( e1.getMessage() ) ;
               socket                = null;
               socketAddress      = null;
     } catch (Exception e) {
     System.out.println("This socket cannot be connected with the server (Exception) " + e.getMessage() ) ;
     if (socket != null) {
          try {
               socket.close();
          } catch (IOException e1) {
               System.out.println( e1.getMessage() ) ;
          socket                = null;
          socketAddress      = null;
try {
                         Thread.sleep( this.wait_cnt ) ;
                    } catch (InterruptedException e) {
It leaks gradually when executing it for a long time.
It makes the generation of the memory leak disregarding timeout error of the
connection a problem.
Is this a bug of JVM?
Or, is it a problem of coding?
Environment:
JVM Sun JVM 1.5.0_6
OS RedHat Enterprise Linux ES 3 update6 (kernel 2.4.21-37ELsmp)

I was searching on Google for "java Calendar memory leak", and I ran across this post. Now, I haven't examined your code but I do see you make a lot of calls to getInstance() in java.util.Calendar, and I am having the same kind of issue as you are describing. So I took the Calendar class out (no more getInstances) and now the memory is still increasing but at a MUCH slower rate, and the CPU time is down drastically. I do not know if this is the cause because I haven't seen it documented, but it would be great if someone could confirm this for me. Maybe try removing the Calendar getInstances and find another way to do it, like using System.getCurrentMillis and working from there to get the hours (that's what I did.)
Hope this helps,
Elijah

Similar Messages

  • Memory leak in JVM 1.3.1

    Hi,
    I'm using the following set up
    OS: Redhat linux 7.1
    Kernel: 2.4.2-1
    glibc-2.1.x for i386
    other set up: ulimit -s 2048, JDK 1.3.1_01
    app: Jboss app server
    I'm running my application in Jboss 2.2.1 which is a I/O intensive. This is a distributed system using visigenic corba 4.5.
    The problem:
    If I run my application the application memory remains stable over a period of time but my JVM memory (system) increases linearly. i got the app memory using : Runtime.getRuntime().totalMemory()
    and JVM memory using : top
    Is the JVM leaking ??? I'm not getting OutOfMemeory error anywhere but my system freezes when it reaches a point where it cannot allocate more memory to java process
    Reg
    Ved

    Freezes? You mean that it stops running at 8 at night and when you come in at 8 the next morning it is still doing nothing? Or that it stops for several minutes?
    Memory problems are not usually the cause of this. Some possibilites:
    -Blocked, deadlocked, threads.
    -Blocked io.
    -Endless spins (code runs but does nothing.)
    -JNI code.

  • Does bdb cache use JVM heap memory?

    I'm using BDB Java API. Do I need to set the heap size to be bigger than the db cache size?

    Hi Mimi,
    BDB cache size and JVM heap size are used for two different things. Cache size is used for storing the db data in modifying them in memory. If the cache size is not optimum then there would be too much of I/O activity and the performance would be affected.
    JVM heap size is used by the JVM to store the objects that the application creates. If the heap size is too small and there are too many objects get created by the application then the GC thread would be very active and will affect the performance of the application.
    Whether the heap size would be bigger than the cache size or not will depend upon how the application works.
    -Debsubhra Roy

  • Can off JVM heap Memory used in the Near-Cache front-tier

    I had tried to config a near-Cache used nio-manager(off JVM heap) in the Front-tier.
    <near-scheme>
          <scheme-name>CohApp-near</scheme-name>
          <front-scheme>
            <external-scheme>
            </external-scheme>
          </front-scheme>
          <back-scheme>
            <distributed-scheme>
              <scheme-ref>CohApp-distributed</scheme-ref>
            </distributed-scheme>
          </back-scheme>
          <invalidation-strategy>auto</invalidation-strategy>
          <autostart>true</autostart>
        </near-scheme>
    when start 'com.tangosol.net.DefaultCacheServer' for this config, error as:
    Oracle Coherence Version 3.7.1.0 Build 27797
    Enterprise Edition: Development mode
    Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
    2014-03-30 16:34:17.518/1.201 Oracle Coherence EE 3.7.1.0 <Error> (thread=main,
    member=n/a): Error org.xml.sax.SAXParseException: cvc-complex-type.2.4.a: Invali
    d content was found starting with element 'external-scheme'. One of '{"http://xm
    lns.oracle.com/coherence/coherence-cache-config":local-scheme, "http://xmlns.ora
    cle.com/coherence/coherence-cache-config":class-scheme}' is expected. - line 92
    Exception in thread "main" (Wrapped: Failed to load the factory) (Wrapped: Missi
    ng or inaccessible constructor "com.tangosol.net.DefaultConfigurableCacheFactory
    (String)"
    <configurable-cache-factory-config>
      <class-name>com.tangosol.net.DefaultConfigurableCacheFactory</class-name>
      <init-params>
        <init-param>
          <param-type>java.lang.String</param-type>
          <param-value>coherence-cache-config.xml</param-value>
        </init-param>
      </init-params>
    </configurable-cache-factory-config>) java.lang.reflect.InvocationTargetExceptio
    n
            at com.tangosol.util.Base.ensureRuntimeException(Base.java:288)
            at com.tangosol.net.ScopedCacheFactoryBuilder.getDefaultFactory(ScopedCa
    cheFactoryBuilder.java:311)
            at com.tangosol.net.DefaultCacheFactoryBuilder.getSingletonFactory(Defau
    ltCacheFactoryBuilder.java:48)
            at com.tangosol.net.DefaultCacheFactoryBuilder.getFactory(DefaultCacheFa
    ctoryBuilder.java:121)
            at com.tangosol.net.ScopedCacheFactoryBuilder.getConfigurableCacheFactor
    y(ScopedCacheFactoryBuilder.java:112)
            at com.tangosol.net.CacheFactory.getConfigurableCacheFactory(CacheFactor
    y.java:126)
            at com.tangosol.net.DefaultCacheServer.getDefaultConfigurableCacheFactor
    y(DefaultCacheServer.java:364)
            at com.tangosol.net.DefaultCacheServer.main(DefaultCacheServer.java:197)
    Caused by: (Wrapped: Missing or inaccessible constructor "com.tangosol.net.Defau
    ltConfigurableCacheFactory(String)"
    <configurable-cache-factory-config>
      <class-name>com.tangosol.net.DefaultConfigurableCacheFactory</class-name>
      <init-params>
        <init-param>
          <param-type>java.lang.String</param-type>
          <param-value>coherence-cache-config.xml</param-value>
        </init-param>
      </init-params>
    </configurable-cache-factory-config>) java.lang.reflect.InvocationTargetExceptio
    n
            at com.tangosol.util.Base.ensureRuntimeException(Base.java:288)
            at com.tangosol.run.xml.XmlHelper.createInstance(XmlHelper.java:2652)
            at com.tangosol.run.xml.XmlHelper.createInstance(XmlHelper.java:2536)
            at com.tangosol.net.ScopedCacheFactoryBuilder.getDefaultFactory(ScopedCa
    cheFactoryBuilder.java:273)
            ... 6 more
    Caused by: java.lang.reflect.InvocationTargetException
            at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
            at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstruct
    orAccessorImpl.java:39)
            at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingC
    onstructorAccessorImpl.java:27)
            at java.lang.reflect.Constructor.newInstance(Constructor.java:513)
            at com.tangosol.util.ClassHelper.newInstance(ClassHelper.java:694)
            at com.tangosol.run.xml.XmlHelper.createInstance(XmlHelper.java:2611)
            ... 8 more
    Caused by: (Wrapped: Failed to load cache configuration: coherence-cache-config.
    xml) (Wrapped) java.io.IOException: Exception occurred during schema validation:
    cvc-complex-type.2.4.a: Invalid content was found starting with element 'externa
    l-scheme'. One of '{"http://xmlns.oracle.com/coherence/coherence-cache-config":l
    ocal-scheme, "http://xmlns.oracle.com/coherence/coherence-cache-config":class-sc
    heme}' is expected.
            at com.tangosol.util.Base.ensureRuntimeException(Base.java:288)
            at com.tangosol.run.xml.XmlHelper.loadResourceInternal(XmlHelper.java:34
    1)
            at com.tangosol.run.xml.XmlHelper.loadFileOrResource(XmlHelper.java:283)
            at com.tangosol.net.DefaultConfigurableCacheFactory.loadConfig(DefaultCo
    nfigurableCacheFactory.java:439)
            at com.tangosol.net.DefaultConfigurableCacheFactory.loadConfig(DefaultCo
    nfigurableCacheFactory.java:425)
            at com.tangosol.net.DefaultConfigurableCacheFactory.<init>(DefaultConfig
    urableCacheFactory.java:155)
            ... 14 more
    Caused by: (Wrapped) java.io.IOException: Exception occurred during schema valid
    ation:
    cvc-complex-type.2.4.a: Invalid content was found starting with element 'externa
    l-scheme'. One of '{"http://xmlns.oracle.com/coherence/coherence-cache-config":l
    ocal-scheme, "http://xmlns.oracle.com/coherence/coherence-cache-config":class-sc
    heme}' is expected.
            at com.tangosol.run.xml.XmlHelper.loadXml(XmlHelper.java:122)
            at com.tangosol.run.xml.XmlHelper.loadXml(XmlHelper.java:157)
            at com.tangosol.run.xml.XmlHelper.loadResourceInternal(XmlHelper.java:32
    2)
            ... 18 more
    Caused by: java.io.IOException: Exception occurred during schema validation:
    cvc-complex-type.2.4.a: Invalid content was found starting with element 'externa
    l-scheme'. One of '{"http://xmlns.oracle.com/coherence/coherence-cache-config":l
    ocal-scheme, "http://xmlns.oracle.com/coherence/coherence-cache-config":class-sc
    heme}' is expected.
            at com.tangosol.run.xml.SimpleParser.parseXml(SimpleParser.java:212)
            at com.tangosol.run.xml.SimpleParser.parseXml(SimpleParser.java:93)
            at com.tangosol.run.xml.SimpleParser.parseXml(SimpleParser.java:162)
            at com.tangosol.run.xml.SimpleParser.parseXml(SimpleParser.java:115)
            at com.tangosol.run.xml.XmlHelper.loadXml(XmlHelper.java:118)
            ... 20 more
    Caused by: org.xml.sax.SAXParseException: cvc-complex-type.2.4.a: Invalid conten
    t was found starting with element 'external-scheme'. One of '{"http://xmlns.orac
    le.com/coherence/coherence-cache-config":local-scheme, "http://xmlns.oracle.com/
    coherence/coherence-cache-config":class-scheme}' is expected.
            at com.sun.org.apache.xerces.internal.util.ErrorHandlerWrapper.createSAX
    ParseException(ErrorHandlerWrapper.java:195)
            at com.sun.org.apache.xerces.internal.util.ErrorHandlerWrapper.error(Err
    orHandlerWrapper.java:131)
            at com.sun.org.apache.xerces.internal.impl.XMLErrorReporter.reportError(
    XMLErrorReporter.java:384)
            at com.sun.org.apache.xerces.internal.impl.XMLErrorReporter.reportError(
    XMLErrorReporter.java:318)
            at com.sun.org.apache.xerces.internal.impl.xs.XMLSchemaValidator$XSIErro
    rReporter.reportError(XMLSchemaValidator.java:417)
            at com.sun.org.apache.xerces.internal.impl.xs.XMLSchemaValidator.reportS
    chemaError(XMLSchemaValidator.java:3182)
            at com.sun.org.apache.xerces.internal.impl.xs.XMLSchemaValidator.handleS
    tartElement(XMLSchemaValidator.java:1806)
            at com.sun.org.apache.xerces.internal.impl.xs.XMLSchemaValidator.startEl
    ement(XMLSchemaValidator.java:705)
            at com.sun.org.apache.xerces.internal.impl.XMLNSDocumentScannerImpl.scan
    StartElement(XMLNSDocumentScannerImpl.java:400)
            at com.sun.org.apache.xerces.internal.impl.XMLDocumentFragmentScannerImp
    l$FragmentContentDriver.next(XMLDocumentFragmentScannerImpl.java:2756)
            at com.sun.org.apache.xerces.internal.impl.XMLDocumentScannerImpl.next(X
    MLDocumentScannerImpl.java:648)
            at com.sun.org.apache.xerces.internal.impl.XMLNSDocumentScannerImpl.next
    (XMLNSDocumentScannerImpl.java:140)
            at com.sun.org.apache.xerces.internal.impl.XMLDocumentFragmentScannerImp
    l.scanDocument(XMLDocumentFragmentScannerImpl.java:511)
            at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(X
    ML11Configuration.java:808)
            at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(X
    ML11Configuration.java:737)
            at com.sun.org.apache.xerces.internal.jaxp.validation.StreamValidatorHel
    per.validate(StreamValidatorHelper.java:144)
            at com.sun.org.apache.xerces.internal.jaxp.validation.ValidatorImpl.vali
    date(ValidatorImpl.java:111)
            at javax.xml.validation.Validator.validate(Validator.java:127)
            at com.tangosol.run.xml.SaxParser.validateXsd(SaxParser.java:236)
            at com.tangosol.run.xml.SimpleParser.parseXml(SimpleParser.java:206)
    So I think that if can off JVM heap Memory used in the Near-Cache front-tier?
    Or can help how to config the  off JVM heap Memory used in the Near-Cache front-tier.
    Thanks.

    Only local-scheme and class-scheme can be used in the front-scheme of a near cache. 

  • Native Memory Area

    JVM Memory = Heap + Non-Heap
    I have configured JVM Memory as 1.5 GB. (OS is Windows) Perm Gen as 74 MB. In between i get "unable to create native threads" issue. I understand that a process in Windows can use a maximum memory of 2 GB.
    1. Is the NATIVE MEMORY space part of Non-Heap? Or is it the remaining of (2GB - (HEAP+NON_HEAP)) ?
    2. Is there a way to get the NATIVE MEMORY SPACE used by the java instance?

    I doubt the answers to any of those matter to your problem.
    If you have the heap maxed out and you are getting that error then you are probably just creating too many threads.
    If you are doing that on purpose then you need to redesign.
    If you are doing it accidently then you need to find the bug that keeps the threads active and fix it.

  • Heap dumps on very large heap memory

    We are experiencing memory leak issues with one of our application deployed on JBOSS (SUN JVM 1.5, Win 32 OS). The application is already memory intensive and consumes the maximum heap (1.5 GB) allowed on a 32-bit JVM on Win32.
    This leaves very few memory for heap dump and the JVM crashes whenever we try adding the heap dump flag (-agentlib:), with a "malloc error"..
    Has anyone faced a scenario like this?
    Alternatively for investigation purpose, we are trying to deploy it on a Windows X64 - but the vendor advises only to run on 32 bit JVM. Here is my question:
    1) Can we run 32bit JVM on Windows 64? Even if we run, can i allocate more than 2 GB for heap memory?
    2) I dont see the rational why we cannot run on 64bit JVM - because, JAVA programs are supposed to be 'platform-independent' and the application in form of byte code should be running no matter if it is a 32bit or 64-bit JVM?
    3) Do we have any other better tools (except HPROF heapdumps) to analyze memory leaks - we tried using profiling tools but they too fail becos of very few memory available.
    Any help is really appreciated! :-)
    Anush

    anush_tv wrote:
    1) Can we run 32bit JVM on Windows 64? Even if we run, can i allocate more than 2 GB for heap memory?Yes, but you're limited to 2GB like any other 32-bit process.
    2) I dont see the rational why we cannot run on 64bit JVM - because, JAVA programs are supposed to be 'platform-independent' and the application in form of byte code should be running no matter if it is a 32bit or 64-bit JVM?It's probably related to JBoss itself, which is likely using native code. I don't have experience with JBoss though.
    3) Do we have any other better tools (except HPROF heapdumps) to analyze memory leaks - we tried using profiling tools but they too fail becos of very few memory available.You could try "jmap", which can dump the heap.

  • Java.lang.reflect.InvocationTargetException and OutOfMemoryError non heap

    Hi,
    After tomcat running 3,4 days well with 75 users, suddenly we are facing InvocationTargetException caused by OutOfMemoryError for 3,4 users some times. The JAVA_OPTS options are well configured as observed no heap error issue. Heap utilization is only 60% but Foud that CodeCache is reaching maximum. As codecache is no heap,so it may not provide outofmemory issue. Currently everyday outof production time we are restarting server to avoid sudden exception in production. The following is the exception we received as suggestion of finding the root cause.
    We are using XForms xslt & xpl's in which our spring integrated java dynamically compiled code will be called.
    ep 11, 2009 8:38:00 AM org.apache.jk.common.ChannelSocket processConnection
    WARNING: processCallbacks status 2
    08:38:00,202 WARN [PortletLocalServiceImpl:143] Portlet not found for liferay.com preferencetest_WAR_ops
    08:38:00,233 WARN [PortletLocalServiceImpl:143] Portlet not found for liferay.com preferencetest_WAR_ops
    java.lang.reflect.InvocationTargetException at sun.reflect.GeneratedMethodAccessor2049.invoke(Unknown Source)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    at java.lang.reflect.Method.invoke(Method.java:597)
    at org.orbeon.oxf.processor.SimpleProcessor$1.readImpl(SimpleProcessor.java:70)
    at org.orbeon.oxf.processor.ProcessorImpl$6.read(ProcessorImpl.java:995)
    at org.orbeon.oxf.processor.ProcessorImpl$ProcessorOutputImpl.read(ProcessorImpl.java:1178)
    at org.orbeon.oxf.processor.JavaProcessor$1.readImpl(JavaProcessor.java:66)
    at org.orbeon.oxf.processor.ProcessorImpl$6.read(ProcessorImpl.java:995)
    at org.orbeon.oxf.processor.ProcessorImpl$ProcessorOutputImpl.read(ProcessorImpl.java:1178)
    at org.orbeon.oxf.processor.ProcessorImpl.readInputAsSAX(ProcessorImpl.java:350)
    at org.orbeon.oxf.processor.pipeline.AggregatorProcessor.access$100(AggregatorProcessor.java:38)
    at org.orbeon.oxf.processor.pipeline.AggregatorProcessor$1.readImpl(AggregatorProcessor.java:93)
    at org.orbeon.oxf.processor.ProcessorImpl$6.read(ProcessorImpl.java:995)
    at org.orbeon.oxf.processor.ProcessorImpl.readInputAsDOM4J(ProcessorImpl.java:368)
    at org.orbeon.oxf.processor.ProcessorImpl.readInputAsDOM4J(ProcessorImpl.java:387)
    at com.expeditor.processors.SingleTaskData.generateData(SingleTaskData.java:85)
    at sun.reflect.GeneratedMethodAccessor2048.invoke(Unknown Source)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
    at java.lang.reflect.Method.invoke(Method.java:597)
    at org.orbeon.oxf.processor.SimpleProcessor$1.readImpl(SimpleProcessor.java:70)
    at org.orbeon.oxf.processor.ProcessorImpl$6.read(ProcessorImpl.java:995)
    at org.orbeon.oxf.processor.ProcessorImpl$ProcessorOutputImpl.read(ProcessorImpl.java:1178)
    at org.orbeon.oxf.processor.JavaProcessor$1.readImpl(JavaProcessor.java:66)
    at org.orbeon.oxf.pipeline.InitUtils.runProcessor(InitUtils.java:95)
    at org.orbeon.oxf.webapp.ProcessorService.service(ProcessorService.java:96)
    at org.orbeon.oxf.portlet.OPSPortletDelegate.forward(OPSPortletDelegate.java:253)
    at org.orbeon.oxf.externalcontext.PortletToExternalContextRequestDispatcherWrapper.forward(PortletToExternalContextRequestDispatcherWrapper.java:35)
    at org.orbeon.oxf.xforms.XFormsSubmissionUtils.openOptimizedConnection(XFormsSubmissionUtils.java:108)
    at org.orbeon.oxf.xforms.XFormsModelSubmission.performDefaultAction(XFormsModelSubmission.java:768)
    at org.orbeon.oxf.xforms.XFormsContainingDocument.dispatchEvent(XFormsContainingDocument.java:1283)
    at org.apache.jk.common.HandlerRequest.invoke(HandlerRequest.java:282)
    at org.apache.jk.common.ChannelSocket.invoke(ChannelSocket.java:754)
    at org.apache.jk.common.ChannelSocket.processConnection(ChannelSocket.java:684)
    at org.apache.jk.common.ChannelSocket$SocketConnection.runIt(ChannelSocket.java:876)
    at org.apache.tomcat.util.threads.ThreadPool$ControlRunnable.run(ThreadPool.java:684)
    at java.lang.Thread.run(Thread.java:619)
    Caused by: java.lang.OutOfMemoryError
    at java.lang.Class.getDeclaredMethods0(Native Method)
    at java.lang.Class.privateGetDeclaredMethods(Class.java:2427)
    at java.lang.Class.getDeclaredMethods(Class.java:1791)
    at org.codehaus.janino.ReflectionIClass.getDeclaredIMethods2(ReflectionIClass.java)
    at org.codehaus.janino.IClass.getDeclaredIMethods(IClass.java)
    at org.codehaus.janino.IClass.getDeclaredIMethods(IClass.java)
    at org.codehaus.janino.UnitCompiler.getIMethods(UnitCompiler.java)
    at org.codehaus.janino.UnitCompiler.findIMethod(UnitCompiler.java)
    at o

    Hi,
    we have observed memory usage with probe in tomcat. Found no issue with non heap of permgen space. We are generating some dynamic classes which are around 30 for entire application. It is working for 3 days . Even in probe found that perm gen space is gradually decreasing.
    When perm gen outofmemory exception arrived (we reduced and tested ) in the logs we are getting the Error specifying it is OutOfMemory because of PermGen space not enoug.
    But CodeChache is reaching it's maximum as observed in probe and Error message is as above withou heap/perm gen/ other details.

  • Can I increase heap memory without specify any class or jar file??

    Hi,
    I tried to increase my heap memory in this way :
    java -Xms256m -Xmx256m
    but I got an error ... it's seem that I must specify a class java or a .jar file ...
    This is the error :
    Usage: java [-options] class [args...]
    (to execute a class)
    or java [-options] -jar jarfile [args...]
    (to execute a jar file)
    where options include:
    -client to select the "client" VM
    -server to select the "server" VM
    -hotspot is a synonym for the "client" VM [deprecated]
    The default VM is client.
    -cp <class search path of directories and zip/jar files>
    -classpath <class search path of directories and zip/jar files>
    A ; separated list of directories, JAR archives,
    and ZIP archives to search for class files.
    -D<name>=<value>
    set a system property
    -verbose[:class|gc|jni]
    enable verbose output
    -version print product version and exit
    -version:<value>
    require the specified version to run
    -showversion print product version and continue
    -jre-restrict-search | -jre-no-restrict-search
    include/exclude user private JREs in the version search
    -? -help print this help message
    -X print help on non-standard options
    -ea[:<packagename>...|:<classname>]
    -enableassertions[:<packagename>...|:<classname>]
    enable assertions
    -da[:<packagename>...|:<classname>]
    -disableassertions[:<packagename>...|:<classname>]
    disable assertions
    -esa | -enablesystemassertions
    enable system assertions
    -dsa | -disablesystemassertions
    disable system assertions
    -agentlib:<libname>[=<options>]
    load native agent library <libname>, e.g. -agentlib:hprof
    see also, -agentlib:jdwp=help and -agentlib:hprof=help
    -agentpath:<pathname>[=<options>]
    load native agent library by full pathname
    -javaagent:<jarpath>[=<options>]
    load Java programming language agent, see java.lang.instrument
    -splash:<imagepath>
    show splash screen with specified image
    can I increase heap memory without specify any class or jar file??
    thx

    chiara wrote:
    Hi,
    I tried to increase my heap memory in this way :
    java -Xms256m -Xmx256m
    but I got an error ... it's seem that I must specify a class java or a .jar file ...
    This is the error :
    Usage: java [-options] class [args...]
    (to execute a class)
    or java [-options] -jar jarfile [args...]
    (to execute a jar file)
    can I increase heap memory without specify any class or jar file??The job of java.exe is to execute java bytecode.
    What is it supposed to do with your request to use 256m of memory for heap
    when you are not giving it a class or a jar to run?

  • UCCX 7 Heap Memory Usage Exceeded Error

    UCCX 7.0.(1) SR5
    Getting the following error when updating or adding new script applications:
    "It is not recommended to update the application as Engine heap memory usage exceeded configured threshold. Click OK to continue and Cancel to exit."
    Apparently this is an alert that was built into SR4 and is configurable under the System Parameters.
    Does anyone have information on what processes use the heap memory in UCCX or how to monitor the usage?

    As Tom can attest to by now, this is something of an iceberg with big sharp edges below the surface.
    The Java heap is fixed at 256MB on CCX. The Java heap is used by Tomcat as execution memory. In addition to this, applications, scripts, and other repository data is loaded into the heap at runtime. Depending on your environment, you may be approaching the limits of the heap, which cannot be changed. If the heap size is reached, it will be dumped and impact calls.
    What have you been doing as of late on your CCX server? How many applications and scripts do you have? Are any of these using XML files extensively?
    Note there is also a possible bug where the MIVR engine does not properly release all objects loaded into the heap at the end of a script execution leading to a memory leak of sorts. The discussion [debate] over this behavior is continuing. As of this week, it may be represented under
    /* Style Definitions */
    table.MsoNormalTable
    {mso-style-name:"Table Normal";
    mso-tstyle-rowband-size:0;
    mso-tstyle-colband-size:0;
    mso-style-noshow:yes;
    mso-style-priority:99;
    mso-style-parent:"";
    mso-padding-alt:0in 5.4pt 0in 5.4pt;
    mso-para-margin:0in;
    mso-para-margin-bottom:.0001pt;
    mso-pagination:widow-orphan;
    font-size:10.0pt;
    font-family:"Times New Roman","serif";}
    CSCte49231. If it is, this may qualify as the most poorly described defect ever.

  • OAS Heap memory issue: An error "java.lang.OutOfMemoryError: GC overhead

    OAS - 10.1.3.4.0
    We are running out of Heap memory and seeing lots of full GC and out of memory events
    Verbose GC is on.
    Users don't know what they are doing to cause this
    We have 30-40 users per server and 1.5 GB heap memory allocated
    There are no other applications on the machine. Only the PRD instance with 1.5 GB allocated to the JVM. We do not have any issue with memory on the server and we could increase the heap but we dont want to go over the 1.5 GB since that is what I understood to be the high end of what is recommended. we only have 30-40 users on each machine. There are 8 servers and a typical heavy usage day we may have 1 or two machines that have the out of memory or continuous full GC in the logs. When this occurs the phones light up with the people on that machine experiencing slowness.
    below is an example of what we see in a file created in the OPMN log folder on the JAS server then this occurs. I think this is the log created when Verbose GC is turned on. I can send you the full log or anything else you need. Thanks
    1194751K->1187561K(1365376K), 4.6044738 secs]
    java.lang.OutOfMemoryError: GC overhead limit exceeded
    Dumping heap to java_pid10644.hprof ...
    [Full GC 1194751K->1188321K(1365376K), 4.7488200 secs]
    Heap dump file created [1326230812 bytes in 47.602 secs]
    [Full GC 1194751K->1177641K(1365376K), 5.6128944 secs]
    [Full GC 1194751K->986239K(1365376K), 4.6376179 secs]
    [Full GC 1156991K->991906K(1365376K), 4.5989155 secs]
    [Full GC 1162658K->1008331K(1365376K), 4.1139016 secs]
    [Full GC 1179083K->970476K(1365376K), 4.9670050 secs]
    [GC 1141228K->990237K(1365376K), 0.0561096 secs]
    [GC 1160989K->1012405K(1365376K), 0.0920553 secs]
    [Full GC 1012405K->1012274K(1365376K), 4.1170216 secs]
    [Full GC 1183026K->1032000K(1365376K), 4.4166454 secs]
    [Full GC 1194739K->1061736K(1365376K), 4.4009954 secs]
    [Full GC 1194739K->1056175K(1365376K), 5.1124431 secs]
    [Full GC 1194752K->1079807K(1365376K), 4.5160851 secs]
    in addition to the 'overhead limit exceded' we also will see :
    [Full GC 1194751K->1194751K(1365376K), 4.6785776 secs]
    [Full GC 1194751K->1188062K(1365376K), 5.4413659 secs]
    [Full GC 1194751K->1194751K(1365376K), 4.5800033 secs]
    [Full GC 1194751K->1194751K(1365376K), 4.4951213 secs]
    [Full GC 1194751K->1194751K(1365376K), 4.5227857 secs]
    [Full GC 1194751K->1171773K(1365376K), 5.5696274 secs]
    11/07/25 11:07:04 java.lang.OutOfMemoryError: Java heap space
    [Full GC 1194751K->1183306K(1365376K), 4.5841678 secs]
    [Full GC 1194751K->1184329K(1365376K), 4.5469164 secs]
    [Full GC 1194751K->1184831K(1365376K), 4.6415273 secs]
    [Full GC 1194751K->1174738K(1365376K), 5.3647290 secs]
    [Full GC 1194751K->1183878K(1365376K), 4.5660217 secs]
    [Full GC 1194751K->1184651K(1365376K), 4.5619460 secs]
    [Full GC 1194751K->1185795K(1365376K), 4.4341158 secs]

    There's an Oracle support note with a very similar MO :
    WebLogic Server: Getting "java.lang.OutOfMemoryError: GC overhead limit exceeded" exception with Sun JDK 1.6 [ID 1242994.1]
    If I search for "java.lang.OutOfMemoryError: GC overhead" on Oracle Support it returns at least 12 documents
    Might be bug 6065704. Search Oracle support for this bug number.
    Best Regards
    mseberg

  • Allocated heap memory goes up even when there is enough free memory

    Hi,
    Our Java application's memory usage keep growing. Further analysis of the heap memory using JProbe shows that the allocated heap memory goes up even when there is enough free memory available in the heap.
    When the process started, allocated heap memory was around 50MB and the memory used was around 8MB. After few hours, the inuse memory remains at 8MB (slight increase in KBs), but the allocated memory went upto 70MB.
    We are using JVM 1.5_10. What could be the reason for heap allocation going up even when there is enough free memory available in heap?
    -Rajesh.

    Hi Eric,
    Please check if there is any error or warning in the Event Viewer based on the data time?
    If there is any error, please post the event ID to help us to troubleshoot.
    Best Regards,
    Anna

  • Confusion on Java heap memory and WLS_FORMS

    Hello all,
    Background first:
    Oracle Forms/Reports 11.1.2 64-bit
    WebLogic Server 10.3.6
    JDK 1.6 update 37 64-bit
    Microsoft Windows 2008 R2
    Using nodemanager to start/stop managed servers
    After having read all of the documentation and searched both this forum and the Internet for advice, I'm still utterly confused about the best way to make use of memory on the server (the server I'm working on now has 8GB). The two trains of thought that I have discovered in my search:
    1). Don't change the Javaheap size at all (stick with the defaults) and just create additional managed servers on the same machine.
    2). Increase the Java heap size for WLS_FORMS
    Having said that, here are my questions:
    A). What is the best-practices approach (#1 or #2)?
    B). If it's #2, what's the approved way to increase the heap size? I have tried adding -Xms and -Xmx arguments to the WLS server start arguments in the WLS console. These are applied when the managed server is started (confirmed in the log file), but because of the way WLS_FORMS is started, there are more -Xms and -Xmx arguments applied after mine, and Java picks the last one mentioned if there are duplicates.
    First update: Question #2 seems to be answered by support note 1260074.1 (the one place I hadn't yet looked)
    Thanks for any insight you can provide. If there's a document I've missed somewhere, I'm happy to be told where it is and will read and summarize findings here.
    Regards,
    John

    John,
    Let me try to comment on each of yours:
    1). We had been getting some "Apache unable to contact the forms server" type errors (the users were seeing the "Failure of server APACHE bridge" error). The log files showed nothing of interest. I increased the memory allocated using setDomainEnv.cmd, and the error seems to have gone away. Yes, I know that it was a shotgun approach, trying something without really having a reason to do so, but it seems to have helped Edit: Now that I review the OHS logs instead of the WLS_FORMS logs, I have found log messages, which leads me to Doc 1380762.1, which tells me I need a patch. DOH. And, oh crikey, Forms 11.1.2.1 is out, it came out shortly after we downloaded 11.1.2.0 to create these environments. Good news/bad news kind of thing... <blockquote>The Apache Bridge error is fairly straight forward if you understand what it is telling you. It is an error generated by mod_wl_ohs who is owned by OHS (Apache). This module is responsible for the connection between OHS and WLS. The Apache Bridge error means that OHS (mod_wls) was unable to get a response from the WLS managed server it was calling. Basically it was unable to cross the bridge ;) The cause could be anything from the managed server is not running, to the managed server is over tasked, or there is a network configuration issue and the managed server simply didn't hear OHS calling.
    This is all discussed in MOS note 1304095.1
    As for 11.1.2.1, this can be installed fresh or as a patch over 11.1.2.0. So for machines that don't currently have anything installed, you can go directly to 11.1.2.1 without having to install 11.1.2.0 first.</blockquote>
    2). As tony.g suggested, we are looking for what we should do to solve the "I have n servers with x GB of RAM, what should I do to the out-of-the-box configuration of Forms for stability" question. <blockquote>As I mentioned, there really are no "Forms" specific tweaks related to how much RAM your machine has. The only exception to this is (although somewhat indirect) to use JVM Pooling. JVM Pooling can reduce the size of each runtime process's memory footprint by moving its java calls to the jvm pool then sharing common requests with other running runtimes. Memory usage by OHS or the WLS managed server really has little to do directly with Forms. Specifically to the managed server, from a Forms point of view, I would not expect the memory cost of WLS_FORMS to increase much because of load. I expect it to increase as concurrent load increases, but I would not expect it to be significant. If I had to guess, seeing an increase of 1m or less per user would not surprise me (this is just a guess - I don't know what the expected values would be). If we were to use our (Oracle) older scalability guidelines, typically we would have suggested that you should consider about 100 sessions per 1 jvm for best performance. Given that v11 uses a newer java version and scalability is better today, I suspect you can easily scale to a few hundred (e.g. 300) or so before performance drops off. Beyond that, the need to add more managed servers would likely be necessary.
    This is discussed in MOS note 989118.1</blockquote>
    3). HA is important to us, so we are implementing a cluster of Forms/Reports servers with an LBR in front of it. I have read in the docs on clustering, cloning a managed server, and via Support, how to increase the heap memory for the WLS_FORMS server. My thought process was "if Oracle gives me instructions on how to increase heap memory and how to clone managed servers, there must be a scenario in which doing so provides benefit." I'm trying to understand the scenarios in which we would do either of those activities. <blockquote>Refer to the note I mentioned above. Generally, if you limit the number of concurrent sessions to less than around 300-400, I would think the default settings should be fine. If you think you would like to go beyond 300 or 400 per managed server then likely you will need to increase the max heap for the managed server. Again, refer to the note I mentioned previously.
    Also see MOS note 1260074.1</blockquote>
    I am aware of the JVM pooling (yes we do call out to Reports) - I've yet to implement this, but it's on my to-do list.
    <blockquote>This is discussed in the [url http://docs.oracle.com/cd/E38115_01/doc.111210/e24477/jvm.htm]Forms Deployment Guide</blockquote>
    Hope that helps ;)
    .

  • Mixing ECC & non-ECC memory

    I just inherited the care of a Dual G5 server. I noticed that there are 2-256 sticks of ECC and 2-256 sticks of non-ECC memory.
    Is this an issue? Or will they work fine together?

    Hi, lobsang-mack, and a Warm Welcome to Apple Discussions and the G5 Power Mac forum!
    It should not cause problems. Presumably the server is working...? ECC is intended for servers and "mission-critical" applications.
    Some explanation of ECC here
    http://www.crucial.com/crucial/pvtcontent/memorytype.asp?model=&memtype=CHOOSE#e cc

  • Find heap memory Size for Web Intelligence processing server in BO 4.0

    Hi All ,
    We need to gather data for sizing inputs.For Adaptive processing server , we can find same by going in CMC> Server > APS > Properties & check value for the parameter -Xmx  . Could you please tell how to  find the max heap memory allocated for a Web Intelligence processing server in BO 4.0 as for Webi server this parameter is not maintained. ?
    Regards ,
    Abhinav

    Hi Abhinav,
    The maximum threshold is a value which may reach on peak usage and Webi processing server cannot occupy memory beyond this value at any time.
    In your situation 9 Webi Processing servers with 16 GB of RAM on server is not recommended. This is because consider situation with total 16 GB of host RAM.
    4 GB should left for OS
    Tomcat will need minimum 2 GB for 200 users
    So you are left with 10 GB RAM for all BO services
    Now 9 Webi Processing servers with 6 GB of Threshhost will not work here
    For this configuration you can have 2 Webi Processing servers with default threshold should be running on single host.
    Regards,
    Hrishikesh

  • Threaded inner classes & heap memory exhaustion

    (_) how can i maximize my threading without running out of
    heap memory?
    push it to the limit, but throttle back before an
    java.lang.OutOfMemoryError.
    (_) within 1 threaded class ThreadClass, i have two threaded inner classes. for each instance of ThreadClass i only
    start one instance of each inner class.
    and, i start hundreds of ThreadClass, but not until the previously running ThreadClass object exits, so only one should be running at any given time.
    so, what about threaded inner classes?
    are they good? bad? cause "OutOfMemoryErrors"?
    are those inner threads not dying?
    what are common causes of:
    java.lang.OutOfMemoryError: java heap space?
    my program runs for about 5-minutes, then
    bails with the memory error.
    how can i drill down and see what
    is eating-up all my memory?
    thanks.

    A Thread class is not the same as a thread of
    execution. Those inner class based threads of
    execution are not dying.maybe. but this is the way i test a thread's life:
    public void run() {
    System.out.println("thread start");
    System.out.println("thread dies and release memory");
    }for each inner thread, and the outer thread, this approach for
    testing thread life reveals that they die.
    Why don't you use a thread pool?ok. i will think about how to do this.
    >
    If not, you need to ensure those inner threads have
    exited and completed.what is a 100% sure check to guarantee a thread exits other than
    the one i use above?
    note:
    the outer thread is running on a remote host, and the inner threads
    are running locally. here are the details:
    public class BB implements Runnable, FinInterface {
      public void run() {
        // do some work on the remote machine
      private void startResultsHandler(OisXoos oisX) {
         ResultHandler rh = new ResultHandler(oisX);
         rh.start();
      public void startDataProxy(OisXoos oisX, String query) {
         DataProxy dp = new DataProxy(oisX, query);
         dp.start();
            public class ResultsHandler extends Thread {
               // runs locally; waits for results from servers
               public void run() {
                   ObjectInputStream ois = new ObjectInputStream(oisX.input);
                    Set result = (Set) ois.readObject();
            }  // ____ class :: _ ResultsHandler _ :: class ____
           public class DataProxy extends Thread {
               // runs locally; performs db queries on behalf of servers
               public void run() {
                   ObjectOutputStream oos = new ObjectOutputStream(oisX.output);
                    while(moreData) {
                        .... // sql queries
                        oos.writeObject(data);
                 StartResultsHandler(oisX);
            } // _____ class  :: _ DataProxy _ :: class _____
    }now, the BB class is not started locally.
    the inner threads are started locally to both service data requests
    by the BB thread as well as wait for its results.
    (_) so, maybe the inner threads cannot exit (but they sure look
    like they exit) until their parent BB thread exits.
    (_) yet, those inner threads have no knowledge that the BB
    thread is running.
    externalizing those inner thread classes will put 2-weeks of work
    in the dust bin. i want to keep them internal.
    thanks.
    here this piece of code that controls everything:
    while(moreData) {
      FinObjects finObj = new BB();
      String symb = (String) data_ois.readObject();
      OisXoos oisX = RSAdmin.getServer();
      oisX.xoos.writeObject(finObj);
      finObj.startDataProxy(finObj, oisX, symb);
    }

Maybe you are looking for

  • Unable to Open/Import Multiple Files in Audtion 3 - Old Bug?

    As stupid as it might sound I can't seem to open more then single files from the Files Window in Audition 3.0.1. You can select them (shift+click or right click) but they don't open. However if you drop them from explorer or "open file with" they do

  • I chose to have my backup encrypted but can no longer remember my password how do I reset the password function?

    I have an iPhone 4S and decided one day to encrypt my backup function. Then after I started to receive information from my iPhone stating that it hasn't been updated in XX weeks, (actually now 23), I decided that maybe the problem was with the encryp

  • Apple Mail: missing plug-in

    I keep getting "missing plug-in" messages from Apple Mail in emails that contain actually JPEGs or other common attachments. I don't know when this exactly started (could be installation of 10.8.3 update or a security update). This "feature" occurs o

  • Sub-Folder View Icons Missing in List View (Finder)

    Since upgrading to Lion, the arrows in Finder for viewing sub folders has disappeared from Finder in list view and cover flow. At first, I just figured it was another nice feature of OS X they decided to kill off, but I was looking at my boyfriend's

  • Wrong wsdl file

    Hello everybody! I'm trying to make a bottom up web service starting from a wsdl file. This wsdl file describes one unique method and when axis tries to generate the java classes then i get the error message "Error executing Ant". Could anyone possib