Concurrent access to Berkeley Db using c++

Hi
I'm trying to implement a concurrent persistent queue using Berkeley DB. As a starter I tried to make two process which both appends to the DB:
#include <unistd.h>
#include <sstream>
#include <db_cxx.h>
class Queue : public DbEnv
public:
Queue ( ) :
DbEnv(0),
db(0)
set_flags(DB_CDB_ALLDB, 1);
open("/tmp/db", DB_INIT_LOCK |
DB_INIT_LOG |
DB_INIT_TXN |
DB_INIT_MPOOL |
DB_RECOVER |
DB_CREATE |
DB_THREAD,
0);
db = new Db(this, 0);
db->set_flags(DB_RENUMBER);
db->open(NULL, "db", NULL, DB_RECNO, DB_CREATE | DB_AUTO_COMMIT | DB_THREAD, 0);
virtual ~Queue ()
db->close(0);
delete db;
close(0);
protected:
Db * db;
class Enqueue : public Queue
public:
Enqueue ( ) : Queue() { }
virtual ~Enqueue () { }
bool push(const std::string& s)
int res;
DbTxn * txn;
try {
txn_begin(NULL, &txn, DB_TXN_SYNC | DB_TXN_WAIT );
db_recno_t k0[4]; // not sure how mutch data is needs???
k0[0] = 0;
Dbt val((void*)s.c_str(), s.length());
Dbt key((void*)&k0, sizeof(k0[0]));
key.set_ulen(sizeof(k0));
key.set_flags(DB_DBT_USERMEM);
res = db->put(txn, &key, &val, DB_APPEND);
if( res == 0 ) {
txn->commit(0);
return true;
} else {
std::cerr << "push failed: " << res << std::endl;
txn->abort();
return false;
} catch( DbException e) {
std::cerr << "DB What()" << e.what() << std::endl;
txn->abort();
return false;
} catch( std::exception e) {
std::cerr << "What()" << e.what() << std::endl;
txn->abort();
return false;
} catch(...) {
std::cerr << "Unknown error" << std::endl;
txn->abort();
return false;
using namespace std;
int main(int argc, const char *argv[])
fork(); // create two independent processes
Enqueue e;
stringstream ss;
for(int i = 0; i < 10; i++){
ss.str("");
ss << "asdf" << i;
cout << ss.str() << endl;
if( ! e.push(ss.str()) )
break;
return 0;
Compiling it:
$ g++ test.cxx -I/usr/include/db4.8 -ldb_cxx-4.8
Create the db-dir
$ mkdir /tmp/db
And when I run it I get all kind of errors (segmentations fault, allocation error, and some times it actually works)
I'm sure that I have missed some locking, but I just do not know how to do it. So, any hints and/or suggestions to fix this are most welcome.
Best regards
Allan W. Nielsen

Okay, I think I found a way to do this. It is not pretty, and I think I can be done easier...
The application is a call home process, where the producer is added data, and producer tries to send it home. If the consumer fails to send it home, it must try again. The database must not block for producer while the consumer is trying to sink data.
The code has a file lock, and will only allow one consumer process.
Here are the code:
#include <db_cxx.h>
#include <sstream>
#include <fstream>
#include <vector>
#include <boost/interprocess/sync/file_lock.hpp>
class Queue : public DbEnv
public:
Queue ( bool sync ) :
DbEnv(0),
db(0)
set_flags(DB_CDB_ALLDB, 1);
if( sync )
set_flags(DB_TXN_NOSYNC, 0);
else
set_flags(DB_TXN_NOSYNC, 1);
open("/tmp/db", DB_INIT_LOCK |
DB_INIT_LOG | DB_INIT_TXN | DB_INIT_MPOOL |
DB_REGISTER | DB_RECOVER | DB_CREATE | DB_THREAD,
0);
db = new Db(this, 0);
db->set_flags(DB_RENUMBER);
db->open(NULL, "db", NULL, DB_RECNO, DB_CREATE | DB_AUTO_COMMIT | DB_THREAD, 0);
virtual ~Queue ()
db->close(0);
delete db;
close(0);
protected:
Db * db;
struct Transaction
Transaction() : t(0) { }
bool init(DbEnv * dbenv ){
try {
dbenv->txn_begin(NULL, &t, 0);
} catch( DbException e) {
std::cerr << "DB What()" << e.what() << std::endl;
return false;
} catch( std::exception e) {
std::cerr << "What()" << e.what() << std::endl;
return false;
} catch(...) {
std::cerr << "Unknown error" << std::endl;
return false;
return true;
~Transaction(){ if( t!=0) t->abort(); }
void abort() { t->abort(); t = 0; }
void commit() { t->commit(0); t = 0; }
DbTxn * t;
struct Cursor
Cursor() : c(0) { }
bool init( Db * db, DbTxn * t) {
try {
db->cursor(t, &c, 0);
} catch( DbException e) {
std::cerr << "DB What()" << e.what() << std::endl;
return false;
} catch( std::exception e) {
std::cerr << "What()" << e.what() << std::endl;
return false;
} catch(...) {
std::cerr << "Unknown error" << std::endl;
return false;
return true;
~Cursor(){ if( c!=0) c->close(); }
void close(){ c->close(); c = 0; }
Dbc * c;
class Enqueue : public Queue
public:
Enqueue ( bool sync ) : Queue(sync) { }
virtual ~Enqueue () { }
bool push(const std::string& s)
int res;
Transaction transaction;
if( ! transaction.init(this) )
return false;
try {
db_recno_t k0[4]; // not sure how mutch data is needs???
k0[0] = 0;
Dbt val((void*)s.c_str(), s.length());
Dbt key((void*)&k0, sizeof(k0[0]));
key.set_ulen(sizeof(k0));
key.set_flags(DB_DBT_USERMEM);
res = db->put(transaction.t, &key, &val, DB_APPEND);
if( res == 0 ) {
transaction.commit();
return true;
} else {
std::cerr << "push failed: " << res << std::endl;
return false;
} catch( DbException e) {
std::cerr << "DB What()" << e.what() << std::endl;
return false;
} catch( std::exception e) {
std::cerr << "What()" << e.what() << std::endl;
return false;
} catch(...) {
std::cerr << "Unknown error" << std::endl;
return false;
const char * create_file(const char * f ){
std::ofstream _f;
_f.open(f, std::ios::out);
_f.close();
return f;
class Dequeue : public Queue
public:
Dequeue ( bool sync ) :
Queue(sync),
lock(create_file("/tmp/db-test-pop.lock")),
number_of_records_(0)
std::cout << "Trying to get exclusize access to database" << std::endl;
lock.lock();
virtual ~Dequeue ()
bool pop(size_t number_of_records, std::vector<std::string>& records)
if( number_of_records_ != 0 ) // TODO, warning
abort();
Cursor cursor;
records.clear();
if( number_of_records_ != 0 )
abort(); // TODO, warning
// Get a cursor
try {
db->cursor(0, &cursor.c, 0);
} catch( DbException e) {
std::cerr << "DB What()" << e.what() << std::endl;
abort();
return false;
// Read and delete
try {
Dbt val;
db_recno_t k0 = 0;
Dbt key((void*)&k0, sizeof(k0));
for( size_t i = 0; i < number_of_records; i ++ ) {
int get_res = cursor.c->get(&key, &val, DB_NEXT);
if( get_res == 0 )
records.push_back(std::string((char *)val.get_data(), val.get_size()));
else
break;
number_of_records_ = records.size();
if( number_of_records_ == 0 ) {
abort();
return false;
} else {
return true;
} catch( DbException e) {
std::cerr << "DB read/delete What() " << e.what() << std::endl;
abort();
return false;
} catch( std::exception e) {
std::cerr << "DB read/delete What() " << e.what() << std::endl;
abort();
return false;
bool commit()
if( number_of_records_ == 0 )
return true;
Transaction transaction;
Cursor cursor;
if( ! transaction.init(this) )
return false;
if( ! cursor.init(db, transaction.t) )
return false;
// Read and delete
try {
Dbt val;
db_recno_t k0 = 0;
Dbt key((void*)&k0, sizeof(k0));
for( size_t i = 0; i < number_of_records_; i ++ ) {
int get_res = cursor.c->get(&key, &val, DB_NEXT);
if( get_res == 0 )
cursor.c->del(0);
else
break; // this is bad!
number_of_records_ = 0;
cursor.close();
transaction.commit();
return true;
} catch( DbException e) {
std::cerr << "DB read/delete What() " << e.what() << std::endl;
return false;
} catch( std::exception e) {
std::cerr << "DB read/delete What() " << e.what() << std::endl;
return false;
void abort()
number_of_records_ = 0;
private:
boost::interprocess::file_lock lock;
size_t number_of_records_;
sigset_t orig_mask;
};

Similar Messages

  • Concurrent Access Licenses

    Post Author: pkumar
    CA Forum: Authentication
    I am getting the following error while trying to logon into CMC:
    All of your system's 0 Concurrent Access Licenses are in use at this time or your system's license key has expired. Try again later or contact your administrator to obtain additional licenses.
    I have 10 named user licenses for 180 days trial version.
    Using a
    1.> System DSN with Windows NT authentication
    2.> CMS server IP address
    3.> CMS server port number :6400
    While installing if I use a File DSN it does not allow me to move to the next step of Installable.
    Thank You
    PKumar

    Post Author: kconner
    CA Forum: Authentication
    Pkumar,
    Not sure if this answers your question but I would probably set up for your 10 users, set up a "generic" Crystal Enterprise user Name /PW combo rather than trying to Authenticate via LDAP/Active Directory; soemthing like company abbrev1 (no spaces) , pw1. See if that works, then move on to more complicated LDAP authen

  • Segment fault in concurrent access to BDB

    Hi, All
    I am extending a single thread BDB application to allow concurrent
    access to the DB from 2 threads, transaction is not needed.
    The environment is opened with the flag (DB_CREATE | DB_INIT_MPOOL | DB_INIT_LOCK).
    Then I open a new DB under the same directory in each thread, and
    modify the DB simultaneously.  my expectation was that each thread
    will not conflict with each other and in turn improve the
    throughput. While the program constantly crashed because of segment
    fault in file (src/lock/lock.c:832, both threads crashed on this
    line).
    I inspected the back trace, the only suspecious thing I found is that
    the 2 threads are using the same locker object[1], so I'm wondering if
    anyone has similar experience implementing concurrent BDB
    applications? And what's possible cause of my problem and possible
    fixes? Thanks.
    [1]
    thread 1: #0  0x00002aaab3651a7f in __lock_get_internal (lt=0x13cbda0, sh_locker=0x2aab0119f258, flags=4, obj=0x1c55650, lock_mode=DB_LOCK_WRITE, timeout=0, lock=0x2aab449fe8a0)
        at ../src/lock/lock.c:832
    thread 2: #0  0x00002aaab3651a7f in __lock_get_internal (lt=0x13cbda0, sh_locker=0x2aab0119f258, flags=4, obj=0x13da640, lock_mode=DB_LOCK_WRITE, timeout=0, lock=0x2aab3ffc78a0)
        at ../src/lock/lock.c:832

    Hi, Mike
    Thanks for the timely reply!
    I referred to the Berkeley DB Transaction Guide, while the doc does not mention the consequence if you violate the scenario requirement. 
    1. I choose the default Data Store model, and my program will allow multiple writers, what's the expected behavior? 2 threads using the same locker compete different locks is supposed to segment fault?  I'm also curious about how a put(key, data) correspond to a locker, keys in the memory page map to the same locker?
    2. I try to enable concurrent reader & writer without transaction, while the guide assumes transaction is enable, is Data Store able to achieve this? Or at least Concurrent Data Store is required?
    Thank you very much!

  • How to synchronize concurrent access to static data in ABAP Objects

    Hi,
    1) First of all I mwould like to know the scope of static (class-data) data of an ABAP Objects Class: If changing a static data variable is that change visible to all concurrent processes in the same Application Server?
    2) If that is the case. How can concurrent access to such data (that can be shared between many processes) be controlled. In C one could use semaphores and in Java Synchronized methods and the monitor concept. But what controls are available in ABAP for controlling concurrent access to in-memory data?
    Many thanks for your help!
    Regards,
    Christian

    Hello Christian
    Here is an example that shows that the static attributes of a class are not shared between two reports that are linked via SUBMIT statement.
    *& Report  ZUS_SDN_OO_STATIC_ATTRIBUTES
    REPORT  zus_sdn_oo_static_attributes.
    DATA:
      gt_list        TYPE STANDARD TABLE OF abaplist,
      go_static      TYPE REF TO zcl_sdn_static_attributes.
    <i>* CONSTRUCTOR method of class ZCL_SDN_STATIC_ATTRIBUTES:
    **METHOD constructor.
    *** define local data
    **  DATA:
    **    ld_msg    TYPE bapi_msg.
    **  ADD id_count TO md_count.
    **ENDMETHOD.
    * Static public attribute MD_COUNT (type i), initial value = 1</i>
    PARAMETERS:
      p_called(1)  TYPE c  DEFAULT ' ' NO-DISPLAY.
    START-OF-SELECTION.
    <b>* Initial state of static attribute:
    *    zcl_sdn_static_attributes=>md_count = 0</b>
      syst-index = 0.
      WRITE: / syst-index, '. object: static counter=',
               zcl_sdn_static_attributes=>md_count.
      DO 5 TIMES.
    <b>*   Every time sy-index is added to md_count</b>
        CREATE OBJECT go_static
          EXPORTING
            id_count = syst-index.
        WRITE: / syst-index, '. object: static counter=',
                 zcl_sdn_static_attributes=>md_count.
    <b>*   After the 3rd round we start the report again (via SUBMIT)
    *   and return the result via list memory.
    *   If the value of the static attribute is not reset we would
    *   start with initial value of md_count = 7 (1+1+2+3).</b>
        IF ( p_called = ' '  AND
             syst-index = 3 ).
          SUBMIT zus_sdn_oo_static_attributes EXPORTING LIST TO MEMORY
            WITH p_called = 'X'
          AND RETURN.
          CALL FUNCTION 'LIST_FROM_MEMORY'
            TABLES
              listobject = gt_list
            EXCEPTIONS
              not_found  = 1
              OTHERS     = 2.
          IF sy-subrc <> 0.
    * MESSAGE ID SY-MSGID TYPE SY-MSGTY NUMBER SY-MSGNO
    *         WITH SY-MSGV1 SY-MSGV2 SY-MSGV3 SY-MSGV4.
          ENDIF.
          CALL FUNCTION 'DISPLAY_LIST'
    *       EXPORTING
    *         FULLSCREEN                  =
    *         CALLER_HANDLES_EVENTS       =
    *         STARTING_X                  = 10
    *         STARTING_Y                  = 10
    *         ENDING_X                    = 60
    *         ENDING_Y                    = 20
    *       IMPORTING
    *         USER_COMMAND                =
            TABLES
              listobject                  = gt_list
            EXCEPTIONS
              empty_list                  = 1
              OTHERS                      = 2.
          IF sy-subrc <> 0.
    * MESSAGE ID SY-MSGID TYPE SY-MSGTY NUMBER SY-MSGNO
    *         WITH SY-MSGV1 SY-MSGV2 SY-MSGV3 SY-MSGV4.
          ENDIF.
        ENDIF.
      ENDDO.
    <b>* Result: in the 2nd run of the report (via SUBMIT) we get
    *         the same values for the static counter.</b>
    END-OF-SELECTION.
    Regards
      Uwe

  • Concurrent access in Diadem

    How does Dia   dem manages concurrent access?
    If several network users each of them having a floating licence, want to access the same objects (datasets, reports, view layout), can they?
    What happens if they want to load/delete/modify datasets located in the same folder?
    And if they want to write on the same report?
    Thanks

    Hi condor31,
    The only thing which is concurrent is the license file.  The DIAdem installation is still local on the client computer, which means that client A's DIAdem is completely separate from client B's DIAdem, because the two DIAdems are running on completely different computers.  So you don't have to worry about collisions at the REPORT object level at all.  If DIAdem A and DIAdem B attempt to access or update the same external files (layout files, data files, etc.), then the usual file access rules apply.  Both DIAdem A and DIAdem B can load the same layout or data file from computer C, say, and play with them back on computers A and B, respectively.  If DIAdem A then overwrites the layout or data file on computer C with an updated version, DIAdem B doesn't notice, because DIAdem B has those files in memory on computer B.  If DIAdem B subsequently overwrites the layout or data file on computer C, then DIAdem B wins and its version of the file is now on computer C, though DIAdem A doesn't know this has happened.  If you want to protect a particular data or layout file, you can of course set its Windows read-write permissions to read-only, then neither DIAdem A nor DIAdem B will be able to change it, though they will be able to read it and use it and save the edited version to a new data or layout file.
    The one exception to this is if DIAdem A and DIAdem B happen to register-load the same data file, then they each have an implicit link back to the data file on computer C, and neither will be able to change that data file, since register-loading opens the data file read-only and puts a lock on it so that no one can change it while you have it open, but this is a special case.
    Brad Turpin
    DIAdem Product Support Engineer
    National Instruments

  • How does the Concurrent Access License (CAL) work.

    Description from Google: How does the Concurrent Access License (CAL) work? Xcelsius Engage Server CALs allow for concurrent live data updates inside Xcelsius dashboards. Every time an end-user triggers a Web service inside an Xcelsius dashboard to retrieve live data, a CAL is consumed for a period of 5 minutes. For that period, in a five CAL deployment for example, there will be only four CALs left for consumption. A five CAL deployment could support up to 25 users and additional CALs can be added to support a larger deployment.
    My question is as follows:
    How a five CAL deployment could support up to 25 users and what does it mean. In the first line it is saying that each CAL for a web service is consumed for a period of 5 minutes and how come it can support 25 users concurrently. Did it mean 25 web service connections inside a swf flash file or 25 different users to access a single web service through swf flash.

    The "Set cost controls" concurrent program is used in R12 to mass update the cost control fields on item costs.
    The cost control region is found by going to Cost management >Item costs > Item Costs
    The concurrent program lets you specify which items /costs should be updated by using various parameters such as cost type, item range, category range etc.
    And you can specify the source for the new cost control data and the new value for the fields.
    Hope this answers your question,
    Sandeep Gandhi

  • Maximum number of concurrent devices that could be used with embedde oracle

    Hi,
    How to know what will be the maximum number of concurrent devices that could be used with embedded oracle?
    Any help will be needful for me
    Thanks and Regards

    user598986 wrote:
    How to know what will be the maximum number of concurrent devices that could be used with embedded oracle?Please define 'embedded Oracle'.
    I assume you are talking about an Oracle-supplied database engine. Currently Oracle has several database engines: Oracle Database (Personal/Enterprise/Standard/Express); Oracle Berkeley Database; Oracle Times Ten Database; (iirc) InnoDB Transactional engine; and down the road possibly MySQL.
    Many people mean Berkeley when asking this kind of question, but sice you ask it in an Oracle Database forum as compared to a Berkeley engine forum, I am not quite sure.
    If you indeed mean 'Oracle Database', then it is a lecense question that needs to be discussed by you license people and Oracle sales.

  • Concurrent Access for Crystal Reports for eclipse

    Hi,
    May i know is there any concurrent access limitation when viewing the reports in web using JRC?

    The performance does get better after the initialization of the JRC application.  Because it is a pure java application without any reliances on servers for processing power, it will rely on the speed of the application server.  In the initialization, all of the libraries get loaded in to the classpath so this does take some time.  Generally speaking the performance will get better after this because everything has been loaded in to memory; that is until you restart the application server.
    The JRC will be a bit slower when rendering a large report.  Depending on the size of that report, you may be looking at between a few seconds and several minutes in processing time.
    Whether or not you use the JRC will depend on the number of users you anticipate having at any given time for your application as well as the general size of your reports.
    Crystal Reports Server comes with a set number of licenses.  Initially it comes with 5 and you can purchase up to 20 or 25.  This means you could potentially have about the same number of users as you would with a JRC application, but if you have large reports then you could take advantage of the benefit of being able to schedule those reports (set them to run during an off time so your users can view the instances quickly when they need to).  You do have to be more mindful of how you use licenses with this product, since for each user logged on to the system there will be a license used.  There are many additional benefits, including performance that can be had with CR Server.  One key difference would be in the cost of the product:  The JRC is essentially free, whereas CR Server is not. 
    I would suggest reading our product documentation and applying it to your situation to determine what implementation would work best for you.

  • Shared data - concurrent access

    Entity beans are best used when shared data is being concurrently accessed.
    Could you please clarify this statement . How does it different than shared data - concurrent access by jdbc DAO class ?

    jverd wrote:
    I have no idea what you're asking. I cannot provide clarification until you do.Ok. let me explain further. my question is , is it a good idea that whenever we need shared concurrent data access in a application Entity bean is the best ? Why? does it really outsmart the jdbc DAO access methodology whenever it comes to the realm of shared concurrent data access ?
    For example: I can think of a system e.g online auction house application. This needs concurrent shared data access ..right ?
    Do you think use of Entity bean is best here instead of jdbc DAO access methodology ? Why ?

  • Unexpected error occurred :concurrent access to HashMap attempted

    While runnig the ALBPM 5.7 we got this error. This looks like the ALBPM workflow engine is using HashMap in a unsynchronized way. is this a known issue and is there a work around for this?
    This error happened shortly after a possible blip in the database server, with exception message which said:
    Message:
    The connectivity to the BEA AquaLogic™ BPM Server database has been successful restablished.
    Any thoughts/insight/past experience....
    Looks like we should be using Hashtable instead of a HashMap (or atleast a Synchronized HashMap)
    This is best done at creation time, to prevent accidental unsynchronized access to the map:
    Map m = Collections.synchronizedMap(new HashMap(...));
    See Exception message below
    Message:
    An unexpected error occurred while running an automatic item.
    Details: Connector [ffmaeng_ENGINE_DB_FUEGOLABS_ARG:SQL:Oracle (ALI)] caused an exception when getting a resource of type [0].
    Detail:Connector [ffmaeng_ENGINE_DB_FUEGOLABS_ARG:SQL:Oracle (ALI)] caused an exception when getting a resource of type [0].
    Caused by: concurrent access to HashMap attempted by Thread[ET(49),5,Execution Thread Pool]
    fuego.connector.ConnectorException: Connector [ffmaeng_ENGINE_DB_FUEGOLABS_ARG:SQL:Oracle (ALI)] caused an exception when getting a resource of type [0].
    Detail:Connector [ffmaeng_ENGINE_DB_FUEGOLABS_ARG:SQL:Oracle (ALI)] caused an exception when getting a resource of type [0].
    at fuego.connector.ConnectorException.exceptionOnGetResource(ConnectorException.java:95)
    at fuego.connector.ConnectorTransaction.getResource(ConnectorTransaction.java:285)
    at fuego.connector.JDBCHelper.getConnection(JDBCHelper.java:43)
    at fuego.server.service.EngineConnectorService.getConnection(EngineConnectorService.java:260)
    at fuego.server.service.EngineConnectorService.getEngineConnection(EngineConnectorService.java:160)
    at fuego.transaction.TransactionAction.getEngineHandle(TransactionAction.java:180)
    at fuego.server.execution.EngineExecutionContext.getEngineHandle(EngineExecutionContext.java:352)
    at fuego.server.execution.EngineExecutionContext.persistInstances(EngineExecutionContext.java:1656)
    at fuego.server.execution.EngineExecutionContext.persist(EngineExecutionContext.java:1010)
    at fuego.transaction.TransactionAction.beforeCompletion(TransactionAction.java:133)
    at fuego.connector.ConnectorTransaction.beforeCompletion(ConnectorTransaction.java:654)
    at fuego.connector.ConnectorTransaction.commit(ConnectorTransaction.java:330)
    at fuego.transaction.TransactionAction.commit(TransactionAction.java:303)
    at fuego.transaction.TransactionAction.startBaseTransaction(TransactionAction.java:470)
    at fuego.transaction.TransactionAction.startTransaction(TransactionAction.java:540)
    at fuego.transaction.TransactionAction.start(TransactionAction.java:213)
    at fuego.server.execution.DefaultEngineExecution.executeImmediate(DefaultEngineExecution.java:118)
    at fuego.server.execution.DefaultEngineExecution.executeAutomaticWork(DefaultEngineExecution.java:58)
    at fuego.server.execution.EngineExecution.executeAutomaticWork(EngineExecution.java:42)
    at fuego.server.execution.ToDoItem.executeAutomaticWork(ToDoItem.java:264)
    at fuego.server.execution.ToDoItem.run(ToDoItem.java:531)
    at fuego.component.ExecutionThread.processMessage(ExecutionThread.java:754)
    at fuego.component.ExecutionThread.processBatch(ExecutionThread.java:734)
    at fuego.component.ExecutionThread.doProcessBatch(ExecutionThread.java:140)
    at fuego.component.ExecutionThread.doProcessBatch(ExecutionThread.java:132)
    at fuego.fengine.ToDoQueueThread$PrincipalWrapper.processBatch(ToDoQueueThread.java:432)
    at fuego.component.ExecutionThread.work(ExecutionThread.java:818)
    at fuego.component.ExecutionThread.run(ExecutionThread.java:397)
    Caused by: java.util.ConcurrentModificationException: concurrent access to HashMap attempted by Thread[ET(49),5,Execution Thread Pool]
    at java.util.HashMap.onExit(HashMap.java:226)
    at java.util.HashMap.transfer(HashMap.java:690)
    at java.util.HashMap.resize(HashMap.java:676)
    at java.util.HashMap.addEntry(HashMap.java:1049)
    at java.util.HashMap.put(HashMap.java:561)
    at fuego.lang.cache.CacheStatistic.lock(CacheStatistic.java:246)
    at fuego.lang.cache.TimedMultiValuatedCache.getLocked(TimedMultiValuatedCache.java:282)
    at fuego.lang.cache.TimedPool.get(TimedPool.java:80)
    at fuego.connector.impl.BaseJDBCPooledConnector.getConnection(BaseJDBCPooledConnector.java:140)
    at fuego.connector.impl.BaseJDBCConnector.getResource(BaseJDBCConnector.java:222)
    at fuego.connector.ConnectorTransaction.getResource(ConnectorTransaction.java:280)
    ... 26 more

    Hi BalusC,
    I forgot to tell one thing, the exception what I mentioned is coming very rarely. The application is in Production and they getting this Exception once in 3 months. Is there any way to re-produce the same exception number of times to check whether it has been fixed or not after installing the updates as you said. If you have any information regarding this exception please send me.
    Thank You.

  • Concurrent access to a SortedMap

    i need a Map data structure which would allow concurrent access (like ConcurrentHashMap) while being able to return elements in a defined order (like SortedMap) for my application. i'm using Java SE 5.
    how do i create this sort of data structure?
    can SortedMaps be somehow wrapped with ConcurrentHashMap similar to:
    Collections.synchronizedMap(new TreeMap());
    ?

    can SortedMaps be somehow wrapped with
    ConcurrentHashMap similar to:
    Collections.synchronizedMap(new TreeMap());
    Sure, you could do that. Although the synchronized map view returned by Collections.synchronizedMap offers only the Map interface (not SortedMap), you'll still have the guarantee that the key/value pairs are sorted by their keys, thus you'll be able to iterate in a defined order, as you requested. Anyway, there is also Collections.synchronizedSortedMap!
    OTOH, the keyword "iteration" already points to a caveat:
    iterating a map is usually done by iterating either its entrySet or its keySet (with subsequent value-retrieval). Even when using a synchronized map view, access to the entry set or key set will need to be synchronized manually as described in the JavaDocs.
    Generally speaking, the Collections.synchronized... views are rarely useful at all, since code blocks that need to be executed atomically almost always include more than one access to the collection.

  • Flex and concurrent access

    I am going to work on a new project. This project is a real time scanning  processing monitor. The application would launch from an html wrapper. Also a few more processes will also start from there using JavaScript code:
    oReadfromScanner1 = new ActiveXObject("comportreader.classname");
    oReadfromScanner2 = new ActiveXObject("comportreader.classname");
    I am going to have up to 10 scanner readers which will update Flex client screen.
    There will be many times when readers try to update the client in exact same time. What is a design pattern to manage simultaneous access to Flex? Or there will be no problem at all?
    Thanks

    Thanks for the feedback. This is still bothering me,
    yes I could have a static RandomAccessFile and
    synchronise on this, but I really want concurrent
    access.
    I've implemented a locking mechanism to prevent
    different RandomAccessFile instances updating the
    same record - is this not a waste if only one
    RandomAccessFile can write to the file anyway?
    Or is there another Java class I can use to access
    the file in this way?
    Thanks for the help.Hi,
    if the intention of using multiple instanced of RandamAccessFile is concurrent access, then i feel your locking mechanism doesnt achieve the purpose..
    also, at any case, you may not plan for full concurrency in updating a file....
    it is more prone to malfunctions..
    probably, to enhance performance, you can lock only the part your code that actually writes to the file, like io.write() , in this way you can perform all business logic with respect to writing and serialize only the actual file writing...
    even in this case, you must be sure that writing to different part of the file, doesnt really impact other parts of the file which might be manipulated by other threads..
    i have one more thought on this,
    if updating different parts of the file doesnt affect content of other parts of the file,
    then can you think of having different files itself?
    if using different files is not a good idea, then
    probably think of using some buffering mechanism, like collect all data concurrently and periodically update the actual file from the buffer.. just a raw idea but all depends on your system needs & requirements.. ..

  • Configuration for best performance with concurrent access

    Could someone tell me the flags I need to specify to achieve the best BDB performance in a multithreaded (concurrent) environment?
    I am using Hash type and have simple access protocol. Just key/value get/put and delete. I don't need transactions or recovery
    I tried DB_CREATE | DB_INIT_MPOOL | DB_INIT_CDB | DB_THREAD env. open flags and DB_CREATE | DB_THREAD db open flags, but I can only achieve 10000 requests per second.
    I can get up to 40000 RPS with a single thread and just DB_CREATE | DB_INIT_MPOOL for environment and DB_CREATE for database. That should be sufficient, but I need that performance in multithreaded environment.
    I tried EnvOpenFlags = DB_CREATE | DB_INIT_MPOOL | DB_THREAD and DbOpenFlags = DB_CREATE | DB_THREAD, but I am getting the following errors on Put:
    page 29778: illegal page type or format
    PANIC: fatal region error detected; run recovery
    PANIC: Invalid argument
    If I add DB_INIT_LOCK to the EnvOpenFlags, I get deadlocks. I am not sure why I even getting them based on my access schema.
    Thanks,
    Igor

    I am running test on 64 bit Windows 2003.
    Data is accessed randomly, but in the test, I do only inserts. Data is just a list of key/value pairs. Key is int and Value is byte array or size 50 (but it could vary up to about 50000).
    In production it could be around 30 database files (one database per file), but for the test I use only one file.
    I don't have config.log file. I built only db_dll and db_stat projects in VSN2005 and I use it form C# code through managed C++ wrapper.
    Here are the stats for the DB_CREATE | DB_INIT_MPOOL | DB_INIT_CDB | DB_THREAD set of flags:
    db_stat -e =>
    Tue Feb 06 09:44:37 2007 Local time
    0x120897 Magic number
    0 Panic value
    4.5.20 Environment version
    Mon Feb 05 17:57:21 2007 Creation time
    0x440a8ca8 Environment ID
    1 Primary region allocation and reference count mutex [0/0 0% !Own]
    2 References
    Thread status blocks:
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    33 Last allocated locker ID
    0x7fffffff Current maximum unused locker ID
    5 Number of lock modes
    1000 Maximum number of locks possible
    1000 Maximum number of lockers possible
    1000 Maximum number of lock objects possible
    3 Number of current locks
    5 Maximum number of locks at any one time
    4 Number of current lockers
    6 Maximum number of lockers at any one time
    2 Number of current lock objects
    4 Maximum number of lock objects at any one time
    6813619 Total number of locks requested
    6813616 Total number of locks released
    0 Total number of locks upgraded
    12 Total number of locks downgraded
    3262547 Lock requests not available due to conflicts, for which we waited
    0 Lock requests not available due to conflicts, for which we did not wait
    0 Number of deadlocks
    0 Lock timeout value
    0 Number of locks that have timed out
    0 Transaction timeout value
    0 Number of transactions that have timed out
    536KB The size of the lock region
    8090 The number of region locks that required waiting (0%)
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    5GB 50MB Total cache size
    1 Number of caches
    5GB 50MB Pool individual cache size
    0 Maximum memory-mapped file size
    0 Maximum open file descriptors
    0 Maximum sequential buffer writes
    0 Sleep after writing maximum sequential buffers
    0 Requested pages mapped into the process' address space
    17M Requested pages found in the cache (99%)
    25834 Requested pages not found in the cache
    22234 Pages created in the cache
    11856 Pages read into the cache
    88248 Pages written from the cache to the backing file
    0 Clean pages forced from the cache
    0 Dirty pages forced from the cache
    0 Dirty pages written by trickle-sync thread
    34063 Current total page count
    5424 Current clean page count
    28639 Current dirty page count
    786431 Number of hash buckets used for page location
    17M Total number of times hash chains searched for a page (17031608)
    1 The longest hash chain searched for a page
    16M Total number of hash chain entries checked for page (16971686)
    0 The number of hash bucket locks that required waiting (0%)
    0 The maximum number of times any hash bucket lock was waited for (0%)
    0 The number of region locks that required waiting (0%)
    0 The number of buffers frozen
    0 The number of buffers thawed
    0 The number of frozen buffers freed
    34100 The number of page allocations
    0 The number of hash buckets examined during allocations
    0 The maximum number of hash buckets examined for an allocation
    0 The number of pages examined during allocations
    0 The max number of pages examined for an allocation
    0 Threads waited on page I/O
    Pool File: data.bdb30
    8192 Page size
    0 Requested pages mapped into the process' address space
    17M Requested pages found in the cache (99%)
    25832 Requested pages not found in the cache
    22234 Pages created in the cache
    11854 Pages read into the cache
    88240 Pages written from the cache to the backing file
    Pool File: e:/BerkeleyDb\admin.bdb
    8192 Page size
    0 Requested pages mapped into the process' address space
    19 Requested pages found in the cache (90%)
    2 Requested pages not found in the cache
    0 Pages created in the cache
    2 Pages read into the cache
    8 Pages written from the cache to the backing file
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    96MB 88KB Mutex region size
    0 The number of region locks that required waiting (0%)
    4 Mutex alignment
    100 Mutex test-and-set spins
    1574134 Mutex total count
    1239 Mutex free count
    1572895 Mutex in-use count
    1572897 Mutex maximum in-use count
    Mutex counts
    1239 Unallocated
    1 db handle
    1 env dblist
    1 env region
    1 lock region
    5 logical lock
    2 mpoolfile handle
    1 mpool filehandle
    17 mpool file bucket
    1 mpool handle
    786431 mpool hash bucket
    786431 mpool buffer I/O
    1 mpool region
    1 unknown mutex type
    1 twister
    db_stat -d =>
    Tue Feb 06 09:46:42 2007 Local time
    61561 Hash magic number
    8 Hash version number
    Little-endian Byte order
    Flags
    8192 Underlying database page size
    0 Specified fill factor
    6048411 Number of keys in the database
    6048411 Number of data items in the database
    27229 Number of hash buckets
    14M Number of bytes free on bucket pages (93% ff)
    0 Number of overflow pages
    0 Number of bytes free in overflow pages (0% ff)
    8438 Number of bucket overflow pages
    30M Number of bytes free in bucket overflow pages (57% ff)
    0 Number of duplicate pages
    0 Number of bytes free in duplicate pages (0% ff)
    1 Number of pages on the free list
    db_stat -m =>
    5GB 50MB Total cache size
    1 Number of caches
    5GB 50MB Pool individual cache size
    0 Maximum memory-mapped file size
    0 Maximum open file descriptors
    0 Maximum sequential buffer writes
    0 Sleep after writing maximum sequential buffers
    0 Requested pages mapped into the process' address space
    17M Requested pages found in the cache (99%)
    27219 Requested pages not found in the cache
    22444 Pages created in the cache
    13241 Pages read into the cache
    120831 Pages written from the cache to the backing file
    0 Clean pages forced from the cache
    0 Dirty pages forced from the cache
    0 Dirty pages written by trickle-sync thread
    35672 Current total page count
    35658 Current clean page count
    14 Current dirty page count
    786431 Number of hash buckets used for page location
    17M Total number of times hash chains searched for a page (17471571)
    1 The longest hash chain searched for a page
    17M Total number of hash chain entries checked for page (17408653)
    0 The number of hash bucket locks that required waiting (0%)
    0 The maximum number of times any hash bucket lock was waited for (0%)
    0 The number of region locks that required waiting (0%)
    0 The number of buffers frozen
    0 The number of buffers thawed
    0 The number of frozen buffers freed
    35695 The number of page allocations
    0 The number of hash buckets examined during allocations
    0 The maximum number of hash buckets examined for an allocation
    0 The number of pages examined during allocations
    0 The max number of pages examined for an allocation
    0 Threads waited on page I/O
    Pool File: data.bdb30
    8192 Page size
    0 Requested pages mapped into the process' address space
    17M Requested pages found in the cache (99%)
    27217 Requested pages not found in the cache
    22444 Pages created in the cache
    13239 Pages read into the cache
    120821 Pages written from the cache to the backing file
    Pool File: e:/BerkeleyDb\admin.bdb
    8192 Page size
    0 Requested pages mapped into the process' address space
    25 Requested pages found in the cache (92%)
    2 Requested pages not found in the cache
    0 Pages created in the cache
    2 Pages read into the cache
    10 Pages written from the cache to the backing file
    db_stat -CA =>
    Default locking region information:
    39 Last allocated locker ID
    0x7fffffff Current maximum unused locker ID
    5 Number of lock modes
    1000 Maximum number of locks possible
    1000 Maximum number of lockers possible
    1000 Maximum number of lock objects possible
    0 Number of current locks
    5 Maximum number of locks at any one time
    0 Number of current lockers
    6 Maximum number of lockers at any one time
    0 Number of current lock objects
    4 Maximum number of lock objects at any one time
    6975306 Total number of locks requested
    6975306 Total number of locks released
    0 Total number of locks upgraded
    15 Total number of locks downgraded
    3423839 Lock requests not available due to conflicts, for which we waited
    0 Lock requests not available due to conflicts, for which we did not wait
    0 Number of deadlocks
    0 Lock timeout value
    0 Number of locks that have timed out
    0 Transaction timeout value
    0 Number of transactions that have timed out
    536KB The size of the lock region
    8274 The number of region locks that required waiting (0%)
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Lock REGINFO information:
    Lock Region type
    4 Region ID
    e:\BerkeleyDB\__db.004 Region name
    0x90a0000 Original region address
    0x90a0000 Region address
    0x9125f00 Region primary address
    0 Region maximum allocation
    0 Region allocated
    REGION_JOIN_OK Region flags
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Lock region parameters:
    1572884 Lock region region mutex [8274/17M 0% 1552/2884]
    1031 locker table size
    1031 object table size
    532064 obj_off
    515560 locker_off
    1 need_dd
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Lock conflict matrix:
    0 0 0 0 0
    0 0 1 0 0
    0 1 1 1 1
    0 0 0 0 0
    0 0 1 0 1
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Locks grouped by lockers:
    Locker Mode Count Status ----------------- Object ---------------
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Locks grouped by object:
    Locker Mode Count Status ----------------- Object ---------------

  • Can i access my account and use the programs on another computer?

    can i access my account and use the programs on another computer? i would like to be able to use both my laptop and my desktop

    yes, you are allowed concurrent installations and activations on, up to, two computers.
    by signing out of one, you can activate on a third etc.

  • Synchronized method not preventing concurrent access

    Hi
    I have 3 classes, T (a Runnable), TRunner (instantiates and starts a thread using T), and Sync (with one synchronized method, foo).
    The problem is that foo is entered concurrently by different threads at the same time. How so?
    T.java:
    import java.util.Calendar;
    class T implements Runnable
       private String name;
       public T(String name)
         this.name = name;
       public void run()
          Thread.currentThread().setName(name);
          Sync s = new Sync();
          System.out.println(Calendar.getInstance().getTime() + ".....Running " + Thread.currentThread().getName());
          s.foo(name);
    }TRunner.java:
    class TRunner
       public static void main(String args[])
           T tx = new T("x");
           T ty = new T("y");
           T tz = new T("z");
           Thread t1 = new Thread(tx);
           Thread t2 = new Thread(ty);
           Thread t3 = new Thread(tz);
           t1.start();
           t2.start();
           t3.start();
    }Sync.java:
    import java.util.Calendar;
    class Sync
       public synchronized void foo(String threadname)
              System.out.println(Calendar.getInstance().getTime() + ":" + threadname + "....entering FOO");
              try
                   Thread.sleep(5000);
              catch (InterruptedException e)
                   System.out.println("interrupted");
              System.out.println(Calendar.getInstance().getTime() + ":" + threadname + "....leaving FOO");
    }Console output:
    C:\javatemp>java TRunner
    Mon Apr 09 15:35:46 CEST 2007.....Running x
    Mon Apr 09 15:35:46 CEST 2007:x....entering FOO
    Mon Apr 09 15:35:46 CEST 2007.....Running y
    Mon Apr 09 15:35:46 CEST 2007:y....entering FOO
    Mon Apr 09 15:35:46 CEST 2007.....Running z
    Mon Apr 09 15:35:46 CEST 2007:z....entering FOO
    Mon Apr 09 15:35:51 CEST 2007:x....leaving FOO
    Mon Apr 09 15:35:51 CEST 2007:y....leaving FOO
    Mon Apr 09 15:35:51 CEST 2007:z....leaving FOO
    C:\javatemp>Thanks in advance.

    Only for static methods. For instance methods, the lock >is the object.You are absolutely right.
    The Class object is no different from any other object. >"Entire" Class object makes no sense.What I wanted to say is that it's better to synchronize on the object we want to protect from concurrent access rather than on the entire class or instance object.
    "Efficiency" is not altered by locking on a Class object vs. >any other object.I studied that it's better to synchronize on the objects we want to protect instead of the entire instance or class object. If one declares a method as synchronized, it means that other threads won't be able to access other synchronized methods for the same object, even if the two weren't in conflict. That was explained as a performance penalty.
    >
    Or when one or more threads may modify it and one or
    more may read it.
    Yep, sure.
    >
    No, they're not.
    You are absolutely right. What I wanted to say is that local variables are unique per thread.
    >
    Local variables are unique per thread, but that's NOT
    atomicity.Sorry for any confusion
    Message was edited by:
    mtedone

Maybe you are looking for

  • Lost sound after an update. need help please

    had some kind of system update. sound worked fine yesterday. did update today, and now nothing. heres what i have HP Pavilion HPE h8xt • Genuine Windows 7 Home Premium [64-bit] • Intel(R) Core(TM) i7-2600 quad-core processor with Turbo-Boost [up to 3

  • Restrict the number of rows in XML publisher

    Hi All, I have report which has header and lines. I want to display all the lines corresponding to an header in one page in xml publisher. Please suggest. Thanks and Regards, Mahesh

  • Licence service has stopped working

    Getting the above error when launching Dreamweaver CS4. Have tried all the "solutions" listed on Adobe site to no avail. The last "solution" cleared the error pop up , I get to accept the licence agreement then Dreamweaver crashes. Any advice appreci

  • How do I stop PSE from opening PDF files?

    I want to use Adobe Reader to open PDF files.  Whenever I click a PDF file to open, the computer opens it with PSE.  I am using Windows XP and Adobe Photoshop Elements 5.0.  How do I switch the computer to use Adobe Reader instead of Adobe Photoshop

  • 9ias reports format problem PDF - HTML - HTMLCSS

    I have a report to be printed in PDF Or HTML or HTMLCSS format. I am using Oralce 9ias and Reports 9i. PDF output is OK. But the same output in HTML and HTMLCSS are not coming properly. For e.g. I have a field with size as 150. I am setting Vertical