Why BDB deadlock ??

BDB is deadlock , env init :
set_lk_detect isn't action. Why??
/* Open and configure an environment. */
int env_init(DB_ENV dbenv, const char home)
u_int32_t flags;
int ret;
ret = dbenv->set_cachesize(dbenv, 0, M_DB_ENV_CACHESIZE, 0);
ret |= dbenv->set_flags(dbenv, DB_TXN_NOSYNC, 1);
ret |= dbenv->set_flags(dbenv, DB_AUTO_COMMIT, 1);
ret |= dbenv->set_flags(dbenv, DB_LOG_INMEMORY, 1);
ret |= dbenv->set_lg_bsize(dbenv, 512*1024);
ret |= dbenv->set_timeout(dbenv, 1*1000*1000, DB_SET_LOCK_TIMEOUT);
ret |= dbenv->set_lk_detect(dbenv, DB_LOCK_MINLOCKS);
flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
DB_INIT_REP | DB_INIT_TXN | DB_RECOVER | DB_THREAD|DB_PRIVATE;
if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0)
dbenv->err(dbenv, ret, "can't open environment");
return (ret);
db_stat "-C co"
env_lock_stat
db_stat "-C co"
show :
8000360b READ 1 HELD /ram/uplinkhis15.db.db page 1
8000360d WRITE 1 WAIT /ram/uplinkhis15.db.db page 1
8000360e READ 1 WAIT /ram/uplinkhis15.db.db page 1
8000360d WRITE 3 HELD /ram/uplinkhis15.db.db page 2
8000360b READ 1 PENDING /ram/uplinkhis15.db.db page 46
vxWorks-Shell#tt perf
4854f0 vxTaskEntry +68 : ipcModuleRecvTaskEntry (3)
683b04 ipcModuleRecvTaskEntry+190: perfCoreIpcCallback (909a4c0)
25d550 perfCoreIpcCallback+dc : perf15mHandAll ()
25d654 perf15mHandAll +18 : perfUplink15mHandle ()
25d850 perfUplink15mHandle+190: DBApiCondCount (2007, 8, b835a80, b83572c)
2c0650 DBApiCondCount +1c4: __db_c_get_pp (be7f8e8, b834c20, b834c40, 1b)
3dd950 __db_c_get_pp +12c: __db_c_get (be7f8e8, b834c20, b834c40, 1b)
3cdc3c __db_c_get +6e0: 425828 ()
425828 __bam_c_dup +ab0: 429e68 (90a5280, 0, b834c20, 1b, b834ad0)          
42a9b8 __bam_c_rget +19b4: __bam_search (90a5280)
3a2e84 __bam_search +120: __bam_get_root (90a5280, 1, 1, 101, b8349c4)
3a28c0 __bam_get_root +108: __db_lget (90a5280, 0, 1, 1, 0, b834900)
3e3714 __db_lget +3cc: __lock_get (9971970, 8000360e, 0, 90a52f8, 1, b83490
0)
3102d0 __lock_get +e0 : __lock_get_internal (97d8930, 8000360e, 0, 90a52f8,
1, f4240, b834900)
3117e4 __lock_get_internal+149c: __db_tas_mutex_lock (9971970, 349)
348808 __db_tas_mutex_lock+2fc: __os_sleep (9971970, 0, 2710)
34be78 __os_sleep +ec : select ()                                          
value = 0 = 0x0
vxWorks-Shell#tt CLI_T
4854f0 vxTaskEntry +68 : cli_telnet_thread ()
665abc cli_telnet_thread+7c : thread_call (90e9ef8)
66bfd8 thread_call +4c : 671568 ()
671568 vty_read +450: 670f24 (be878c8)
670f9c vty_clear_buf +4e0: vty_command (be878c8, be847b0)
66ebec vty_command +54 : cmd_execute_command (be773e0, be878c8, 0, 0)
6614a0 cmd_execute_command+130: 660ec0 (be773e0, be878c8, 0)
66134c cmd_complete_command+570: 6bbb04 (&no_perf_uplink_if_cmd, be878c8, 1, 90e
9ab8)
6bbb6c perfCheckSampleTime+24e8: perfDisableUplink (1010000)
25e5ac perfDisableUplink+c0 : perfDeleteUplinkIf (1, 1)
260e90 perfDeleteUplinkIf+21c: DBApiDelSingle (2007, 90e9758)
2bef74 DBApiDelSingle +178: __db_del_pp (970ddb0, 90b8690, 90e89c8, 0)
3d9ad4 __db_del_pp +284: __db_del (970ddb0, 90b8690, 90e89c8, 0)
3c2048 __db_del +308: __db_c_close (90911b8)
3cc73c __db_c_close +238: 423b84 ()
423b84 __bam_c_refresh+7a8: 42ad00 (90911b8)
42b0c4 __bam_c_rget +20c0: __bam_search (90911b8)
3a2e84 __bam_search +120: __bam_get_root (90911b8, 1, 0, 40000, 90e877c)
3a28c0 __bam_get_root +108: __db_lget (90911b8, 0, 1, 2, 0, 90e86b8)
3e3714 __db_lget +3cc: __lock_get (9971970, 8000360d, 0, 9091230, 2, 90e86b
8)
3102d0 __lock_get +e0 : __lock_get_internal (97d8930, 8000360d, 0, 9091230,
2, f4240, 90e86b8)
3117e4 __lock_get_internal+149c: __db_tas_mutex_lock (9971970, 347)
348808 __db_tas_mutex_lock+2fc: __os_sleep (9971970, 0, 2710)
34be78 __os_sleep +ec : select ()                                          
b41d0 select +168: semTake ()
f0e88 semTake +13c: semBTake ()
value = 0 = 0x0
vxWorks-Shell#tt tSnmpd
4854f0 vxTaskEntry +68 : e1adc (eeeeeeee)
e1b10 snmpdInit +1a4: snmpIoMain ()
e31a8 snmpIoMain +b8 : e31c4 ()                                           
e3324 snmpIoMain +234: snmpdPktProcess ()
e1be0 snmpdPktProcess+28 : Process_Rcvd_SNMP_Packet_Async ()
48b4a4 Process_Rcvd_SNMP_Packet_Async+e0 : 48b0a4 ()
48b3a8 process_packet_two+3b4: process_packet_two ()
48b060 process_packet_two+6c : SNMP_Process_Next_PDU ()
490bf8 SNMP_Process_Next_PDU+324: eponPerfHis15minUplinkEntry_next (3, 3, 90b838
c, 9091450, 9091340)
45e210 eponPerfHis15minUplinkEntry_next+50 : eponPerfHis15minUplinkEntry_int_nex
t (3, 90b838c, be91860)
47bb38 eponPerfHis15minUplinkEntry_int_next+238: perfGetUplinkHistory15StatNext
(be91860)
6b6a80 perfGetUplinkHistory15StatNext+28 : DBApiQueryNext (2007, be91860, be9186
0)
2bfb9c DBApiQueryNext +1ec: __db_c_get_pp (970d7b0, be90cf0, be90d10, 19)
3dd950 __db_c_get_pp +12c: __db_c_get (970d7b0, be90cf0, be90d10, 19)
3cdc3c __db_c_get +6e0: 4257e8 ()
4257e8 __bam_c_dup +a70: 429e68 (90bd268, 0, be90cf0, 19, be90ba0)          
42a9b8 __bam_c_rget +19b4: __bam_search (90bd268)
3a2e84 __bam_search +120: __bam_get_root (90bd268, 1, 1, 581, be90a94)
3a28c0 __bam_get_root +108: __db_lget (90bd268, 0, 1, 1, 0, be909d0)
3e3714 __db_lget +3cc: __lock_get (9971970, 8000360f, 0, 90bd2e0, 1, be909d
0)
3102d0 __lock_get +e0 : __lock_get_internal (97d8930, 8000360f, 0, 90bd2e0,
1, f4240, be909d0)
3117e4 __lock_get_internal+149c: __db_tas_mutex_lock (9971970, 344)
value = 0 = 0x0
vxWorks-Shell#
vxWorks-Shell#
vxWorks-Shell#
vxWorks-Shell#env_lock_stat ---->lock_stat_print
182 Last allocated locker ID
0x7fffffff Current maximum unused locker ID
9 Number of lock modes
1000 Maximum number of locks possible
1000 Maximum number of lockers possible
1000 Maximum number of lock objects possible
76 Number of current locks
82 Maximum number of locks at any one time
185 Number of current lockers
185 Maximum number of lockers at any one time
74 Number of current lock objects
81 Maximum number of lock objects at any one time
37040 Total number of locks requested
36961 Total number of locks released
0 Total number of locks upgraded
72 Total number of locks downgraded
72 Lock requests not available due to conflicts, for which we waited
1 Lock requests not available due to conflicts, for which we did not wait
0 Number of deadlocks
1000000 Lock timeout value
0 Number of locks that have timed out
0 Transaction timeout value
0 Number of transactions that have timed out
344KB The size of the lock region
29 The number of region locks that required waiting (0%)
value = 0 = 0x0
vxWorks-Shell#

Hello,
There are three basic ways to perform deadlock detection.
http://www.oracle.com/technology/documentation/berkeley-db/db/ref/lock/dead.html
http://www.oracle.com/technology/documentation/berkeley-db/db/api_c/env_set_lk_detect.html
http://www.oracle.com/technology/documentation/berkeley-db/db/api_c/lock_detect.html
http://www.oracle.com/technology/documentation/berkeley-db/db/api_c/env_set_timeout.html
1. use the DbEnv::set_lk_detect method to automatically run the
deadlock detector whenever there is a conflict over a lock. With
this method you specify what lock request(s) should be rejected.
With this choice the lock table will be walked as soon as a lock
request blocks) hence an application should not have to wait
on a lock before discovering that deadlock occurred.
2. use the DbEnv::lock_detect method which runs a single iteration
of the BDB deadlock detector. In this case use a thread or external
process to perform deadlock detection. Your application will run
deadlock detection at some interval. The application may have to
wait to be notified of a deadlock, but you will not incur the
overhead of walking the lock table every time a lock request is blocked.
3. use DBenv::set_timeout to configure the locking subsystem to time out
any lock not released within a specified amount of time. Note that lock
timeouts are only checked when a lock request is blocked or when deadlock
detection is otherwise performed. Therefore, a lock can have timed out
and still be held for some length of time until BDB has a reason to
examine its locking tables.
So for your question on DbEnv::lock_detect, yes this needs to be
run in a thread or external process after some specific time interval,
like a few seconds to a minute.
For your initial question, please clarify in a bit more detail what
the problem is. The output from db_stat -Co does not look to show a
deadlock.
Thanks,
Sandra

Similar Messages

  • Bdb deadlocks when opening database

    When I call _dbp->open(), the library hangs.  I am attempting to test how BDB works during system crashes, in order to code my application appropriately, but these hangs are difficult to work with.
    I am running multiple processes all reading and writing to a single database. I then kill off the processes one at a time, until one of the terminations hangs all the other processes reading or writing to that database. I understand the database may at that time be in a bad state and may need to be re-opened with the DB_RECOVER flag, but I don't get any kind of an error message on the other processes reading or writing to the database. They just hang. (it looks like they are waiting for the lock of the terminated process to be closed).
    I have code to check for a DB_RUNRECOVERY error number returned as part of the exception thrown during a db->open() operation, so I can then can db->open() again with the DB_RECOVER option, but db->open() never returns.
    I have tried setting the transaction timeout values and lock timeout values, but nothing seems to be working.
    I have tried working with the DB_REGISTER option, but would prefer not to limit each process to a single environment. I'd like to hide the BDB environment entirely from the caller and simply require the database name.
    Any assistance would be appreciated,
    Todd

    I do not have the option of launching a separate process to monitor the database. Unfortunately, I have resigned to the fact I must provide a separate thread to test for failures and deadlocks.
    As a result, I have created a static process-wide "EnvironmentManager" class to maintain a collection of Environments and Databases opened by the current process. This ensures the process will only have a single instance of any Environment open. As a result, I should be able to open my environment(s) with DB_REGISTER and DB_RECOVER.
    I also created a monitor thread. This thread does the following:
    foreach open environment
    envp->lock_detect(0,DB_LOCK_DEFAULT,NULL);
    envp->failchk(0);
    If a DbException with errno set to DB_RUNRECORY occurs, I do the following:
         Close each database opened within the faulted environment
         Close the environment
         Re-Open the environment
         Re-Open each database
    I would like to cycle through any open transactions, locks and cursors to close them, but it appears I do not have access to them.
    My test engine runs in an infinite loop doing the following:
    - Open Environment and Database
    - Write 200 records
    - Read 200 records
    - Get a count of records
    - Close the database and environment
    To test the failures, I launch multiple instances of the test engine then kill the processes one at a time. Obviously my production application with do a normal shut-down under normal conditions, but I need to handle time when an application crashes unexpectedly. I understand problems will occur, my goal is simply to get the system to throw exceptions that I can recover from. I need to resolve all dead-lock issues that can hang my application and all unexpected application-aborts that occur as a result of DB_ASSERT failures.
    The problems I am now encountering after implementing all of this:
    1) After a process has crashed and I attempt to ‘recover’ from within another running process, I am getting an exception regarding an open file handle to __db.register.
    PANIC: fatal region error detected; run recovery
    File handles still open at environment close
    Open file handle: C:\__db.register
    assert failure: ..\mutex\mut_win32.c/174: "__ip != NULL && __ip->dbth_state != THREAD_OUT"
    2) If I try to run the same application multiple times quickly by double clicking on the same executable, I get the following exception:
    DB_REGISTER limits processes to one open DB_ENV handle per environment
    DbEnv::open: Invalid argument
    Both of these issues appear to be with the DB_REGISTER option being specified when I open the environment, but things get much murkier if I remove that option.
    If I do not specify DB_REGISTER and DB_RECOVER on the command line, it is possible for my call to DbEnv->Open() to hang indefinitely. Otherwise, I would call Open() once and if it failed, call it again with DB_RECOVER.
    Please let me know how to proceed. How have other people handled running Berkeley DB across multiple processes?
    Regards,
    Todd

  • Why bdb-ha is not working normal?

    in out company project,we used the bdb-ha to replication master bdb data to slave bdb. but we find that datareplication is not working well,and we will lose some important data, which will bring up serious problem for us.
    i want to know what will the cause the problem of bdb data synchronous failed? where we can find the some introduce about data synchronous?
    thanks.

    Hello,
    The documentation on:
    Synchronizing with a master is at:
    http://www.oracle.com/technology/documentation/berkeley-db/db/ref/rep/mastersync.html
    Additionally what version, platform are you running on and
    what details are available on exactly what is going on?
    Please turn verbose messaging on at both the master and slave
    and reproduce the problem with the smallest test possible so that
    the logs files are a manageable size. The documentation
    on how to get complete replication logging is at:
    http://www.oracle.com/technology/documentation/berkeley-db/db/api_c/env_set_verbose.html#DB_VERB_REPLICATION
    Depending on the version used, you may also need to
    build the Berkeley DB library with --enable-diagnostic
    as well as call the DB_ENV->set_verbose method.
    Thanks,
    Sandra

  • Deadlocks with ALTER TABLE DISABLE CONSTRAINT

    Hello,
    We're deleting millions of redundant rows from a particular table in our live 10g database. This is being done online because the downtime would be unacceptable. The table in question has 30 child tables, so for speed I am disabling the foreign keys using ALTER TABLE DISABLE CONSTRAINT before the deletion (we haven't had any constraint violations for ages). Without this, deletion takes about 1 second per row i.e. a very long time.
    However, we're finding that ALTER TABLE DISABLE CONSTRAINT often reports ORA-00060: deadlock detected. This is causing problems with the live system. Can anyone think of the reason why a deadlock might occur in this situation and what we could do to prevent it happening? Note that any solution has to be doable without downtime unless it takes less than 30 minutes.
    Thanks a lot
    Ed
    Edited by: edwiles on Feb 4, 2009 6:02 AM

    look suggestions in the similar thread:
    Re: Deadlock when deleting a not linked data record in a parent table

  • Service Broker Activation Deadlocking

    I have implemented a (slightly) modified version of conversation recycling using conversation timers and stored procedure activation from http://rusanu.com/2007/05/03/recycling-conversations/ . However it appears that, occasionally, deadlocks  occur
    between the send and activated procedures on the conversation group/table. The main modification is that instead of having a column to represent the SPID in the table I am using an IdentifierType and Identifier value to identify the conversation. However I
    am only using the defaults (@@SPID) so I don't think that should matter in this case.
    For the send side I have:
    CREATE PROCEDURE [dbo].[usp_SendMessage]
    @endpointCode VARCHAR(255) = NULL,
    @endpointGroup VARCHAR(255) = NULL,
    @xmlPayload XML=NULL,
    @binaryPayload VARBINARY(MAX)=NULL,
    @varcharPayload VARCHAR(MAX)=NULL,
    @identifier VARCHAR(50) = @@SPID,
    @identifierType VARCHAR(50) = '@@SPID'
    AS BEGIN
    SET NOCOUNT ON
    DECLARE @fromService SYSNAME,
    @toService SYSNAME,
    @onContract SYSNAME,
    @messageType SYSNAME,
    @conversationTimeout INT
    SELECT @fromService = FromService
    , @toService = ToService
    , @onContract = OnContract
    , @messageType = MessageType
    , @conversationTimeout = ConversationTimeout
    FROM dbo.ServiceBrokerEndpointConfig
    WHERE GroupCode = @endpointGroup
    IF @fromService IS NULL OR @toService IS NULL OR @onContract IS NULL OR @messageType IS NULL OR @conversationTimeout IS NULL
    BEGIN
    RAISERROR (
    N'Failed to get endpoint config for GroupCode ''%s''.'
    , 16, 1, @endpointGroup) WITH LOG;
    RETURN;
    END
    DECLARE @SBDialog UNIQUEIDENTIFIER
    DECLARE @Message XML
    DECLARE @counter INT
    DECLARE @error INT
    DECLARE @handle UNIQUEIDENTIFIER;
    DECLARE @NotNullCount INT = ((CASE WHEN @xmlPayload IS NULL THEN 0 ELSE 1 END)
    + (CASE WHEN @binaryPayload IS NULL THEN 0 ELSE 1 END)
    + (CASE WHEN @varcharPayload IS NULL THEN 0 ELSE 1 END))
    IF @NotNullCount > 1
    BEGIN
    RAISERROR (
    N'Failed to SEND because %i payload fields are filled in when no more than 1 is expected'
    , 16, 1, @NotNullCount) WITH LOG;
    RETURN;
    END
    SET @counter = 1
    WHILE (1=1)
    BEGIN
    SET @handle = NULL
    -- Seek an eligible conversation in [ServiceBrokerConversations]
    -- We will hold an UPDLOCK on the composite primary key
    SELECT @handle = Handle
    FROM [ServiceBrokerConversations] WITH (UPDLOCK)
    WHERE Identifier = @identifier
    AND IdentifierType = @identifierType
    AND FromService = @fromService
    AND ToService = @toService
    AND OnContract = @onContract;
    IF @handle IS NULL
    BEGIN
    -- Need to start a new conversation for the current @Id
    BEGIN DIALOG CONVERSATION @handle
    FROM SERVICE @fromService
    TO SERVICE @toService
    ON CONTRACT @onContract
    WITH ENCRYPTION = OFF;
    -- Then the sender must listen on the
    -- send queue for the http://schemas.microsoft.com/SQL/ServiceBroker/DialogTimer message type and
    -- cleanup appropriately.
    IF @conversationTimeout IS NOT NULL
    BEGIN
    BEGIN CONVERSATION TIMER (@handle) TIMEOUT = @conversationTimeout;
    END
    INSERT INTO [ServiceBrokerConversations]
    (Identifier, IdentifierType, FromService, ToService, OnContract, Handle)
    VALUES
    (@identifier, @identifierType, @fromService, @toService, @onContract, @handle);
    END;
    IF @xmlPayload IS NOT NULL
    BEGIN
    -- Attempt to SEND on the associated conversation
    ;SEND ON CONVERSATION @handle
    MESSAGE TYPE @messageType (@xmlPayload);
    END ELSE IF @binaryPayload IS NOT NULL
    BEGIN
    ;SEND ON CONVERSATION @handle
    MESSAGE TYPE @messageType (@binaryPayload);
    END ELSE BEGIN
    ;SEND ON CONVERSATION @handle
    MESSAGE TYPE @messageType (@varcharPayload);
    END
    SELECT @error = @@ERROR;
    IF @error = 0
    BEGIN
    -- Successful send, just exit the loop
    BREAK;
    END
    SELECT @counter = @counter+1;
    IF @counter > 10
    BEGIN
    -- We failed 10 times in a row, something must be broken
    RAISERROR (
    N'Failed to SEND on a conversation for more than 10 times. Error %i.'
    , 16, 1, @error) WITH LOG;
    BREAK;
    END
    -- Delete the associated conversation from the table and try again
    DELETE FROM [ServiceBrokerConversations]
    WHERE Handle = @handle;
    SET @handle = NULL;
    END
    END
    And for the activation on the initiator queue I have:
    CREATE PROCEDURE [dbo].[usp_InitiatorQueueHandler]
    AS
    BEGIN
    SET NOCOUNT ON
    DECLARE @handle UNIQUEIDENTIFIER;
    DECLARE @messageTypeName SYSNAME;
    DECLARE @messageBody VARBINARY(MAX);
    WHILE (1=1)
    BEGIN
    BEGIN TRAN;
    ;WAITFOR (RECEIVE TOP(1)
    @handle = conversation_handle,
    @messageTypeName = message_type_name,
    @messageBody = message_body
    FROM [InitiatorQueue]), TIMEOUT 5000;
    IF (@@ROWCOUNT = 0)
    BEGIN
    COMMIT TRAN;
    BREAK;
    END
    -- Call the base stored procedure to handle ending the conversation
    EXEC dbo.usp_BrokerHandleInitiator @handle, @messageTypeName, @messageBody
    COMMIT TRAN;
    END
    END
    GO
    ALTER QUEUE [InitiatorQueue]
    WITH ACTIVATION (
    STATUS=ON,
    PROCEDURE_NAME=dbo.usp_InitiatorQueueHandler,
    EXECUTE AS OWNER,
    MAX_QUEUE_READERS=10
    GO
    CREATE PROCEDURE [dbo].[usp_BrokerHandleInitiator]
    @handle UNIQUEIDENTIFIER,
    @messageTypeName SYSNAME,
    @messageBody VARBINARY(MAX)
    AS
    BEGIN
    SET NOCOUNT ON
    IF @handle IS NOT NULL
    BEGIN
    -- Delete the message from the [ServiceBrokerConversations] table
    -- before sending the [EndOfStream] message. The order is
    -- important to avoid deadlocks.
    IF @messageTypeName = N'http://schemas.microsoft.com/SQL/ServiceBroker/DialogTimer'
    OR @messageTypeName = N'http://schemas.microsoft.com/SQL/ServiceBroker/EndDialog'
    BEGIN
    DELETE FROM [ServiceBrokerConversations]
    WHERE [Handle] = @handle;
    END
    IF @messageTypeName = N'http://schemas.microsoft.com/SQL/ServiceBroker/DialogTimer'
    BEGIN
    ;SEND ON CONVERSATION @handle
    MESSAGE TYPE [EndOfStream];
    END
    ELSE IF @messageTypeName = N'http://schemas.microsoft.com/SQL/ServiceBroker/EndDialog'
    BEGIN
    END CONVERSATION @handle;
    END
    ELSE IF @messageTypeName = N'http://schemas.microsoft.com/SQL/ServiceBroker/Error'
    BEGIN
    END CONVERSATION @handle;
    -- We could send a notification or store the error in a table for further inspection
    DECLARE @error INT;
    DECLARE @description NVARCHAR(4000);
    WITH XMLNAMESPACES (N'http://schemas.microsoft.com/SQL/ServiceBroker/Error' AS ssb)
    SELECT
    @error = CAST(@messageBody AS XML).value(
    '(//ssb:Error/ssb:Code)[1]', 'INT'),
    @description = CAST(@messageBody AS XML).value(
    '(//ssb:Error/ssb:Description)[1]', 'NVARCHAR(4000)')
    -- Maybe log to audit log instead?
    RAISERROR(N'Received error Code:%i Description:"%s"',
    16, 1, @error, @description) WITH LOG;
    END;
    END
    END
    The deadlock XML is:
    <deadlock>
    <victim-list>
    <victimProcess id="process807dbd0c8" />
    </victim-list>
    <process-list>
    <process id="process807dbd0c8" taskpriority="0" logused="0" waitresource="METADATA: database_id = 21 CONVERSATION_GROUP($hash = 0xff26c7e1:0x478840de:0xd403bb)" waittime="2600" ownerId="8333217736" transactionname="GetDialogByHandle" lasttranstarted="2015-03-23T10:53:58.683" XDES="0x87f251c90" lockMode="X" schedulerid="2" kpid="7220" status="suspended" spid="110" sbid="0" ecid="0" priority="0" trancount="2" lastbatchstarted="2015-03-23T10:53:58.683" lastbatchcompleted="2015-03-23T10:53:58.683" lastattention="1900-01-01T00:00:00.683" clientapp=".Net SqlClient Data Provider" hostname="COLFOQA2" hostpid="1436" loginname="dev" isolationlevel="read committed (2)" xactid="8333217704" currentdb="21" lockTimeout="4294967295" clientoption1="673185824" clientoption2="128056">
    <executionStack>
    <frame procname="MYDB.dbo.usp_SendMessage" line="116" stmtstart="7540" stmtend="7696" sqlhandle="0x03001500aada77428391a0005da4000001000000000000000000000000000000000000000000000000000000">
    SEND ON CONVERSATION @handle
    MESSAGE TYPE @messageType (@xmlPayload); </frame>
    </executionStack>
    <inputbuf>
    Proc [Database Id = 21 Object Id = 1115151018] </inputbuf>
    </process>
    <process id="process869a5e558" taskpriority="0" logused="588" waitresource="KEY: 21:72057594039959552 (1f1ae6770d1b)" waittime="2600" ownerId="8333217730" transactionname="user_transaction" lasttranstarted="2015-03-23T10:53:58.683" XDES="0x3e28456a8" lockMode="U" schedulerid="4" kpid="6720" status="background" spid="22" sbid="0" ecid="0" priority="0" trancount="2">
    <executionStack>
    <frame procname="MYDB.dbo.usp_BrokerHandleInitiator" line="28" stmtstart="1996" stmtend="2144" sqlhandle="0x03001500f704cd06e691a0005da4000001000000000000000000000000000000000000000000000000000000">
    DELETE FROM [ServiceBrokerConversations]
    WHERE [Handle] = @handle; </frame>
    <frame procname="MYDB.dbo.usp_InitiatorQueueHandler" line="29" stmtstart="1014" stmtend="1172" sqlhandle="0x03001500316f56101694a0005da4000001000000000000000000000000000000000000000000000000000000">
    EXEC dbo.usp_BrokerHandleInitiator @handle, @messageTypeName, @messageBody </frame>
    </executionStack>
    <inputbuf>
    </inputbuf>
    </process>
    </process-list>
    <resource-list>
    <metadatalock subresource="CONVERSATION_GROUP" classid="$hash = 0xff26c7e1:0x478840de:0xd403bb" dbid="21" id="lock54fdb1800" mode="X">
    <owner-list>
    <owner id="process869a5e558" mode="X" />
    </owner-list>
    <waiter-list>
    <waiter id="process807dbd0c8" mode="X" requestType="wait" />
    </waiter-list>
    </metadatalock>
    <keylock hobtid="72057594039959552" dbid="21" objectname="MYDB.dbo.ServiceBrokerConversations" indexname="PK__ServiceB__877FDFD18DF079BD" id="lock6c65b1a00" mode="U" associatedObjectId="72057594039959552">
    <owner-list>
    <owner id="process807dbd0c8" mode="U" />
    </owner-list>
    <waiter-list>
    <waiter id="process869a5e558" mode="U" requestType="wait" />
    </waiter-list>
    </keylock>
    </resource-list>
    </deadlock>
    I have a clustered index on the fields I am SELECTing by and a UNIQUE index on the Handle (for the DELETE). When running the SELECT/DELETE statements against the table the query plan reports index seeks are being used:
    CREATE TABLE [dbo].[ServiceBrokerConversations] (
    [Identifier] VARCHAR (50) NOT NULL,
    [IdentifierType] VARCHAR (50) NOT NULL,
    [FromService] [sysname] NOT NULL,
    [ToService] [sysname] NOT NULL,
    [OnContract] [sysname] NOT NULL,
    [Handle] UNIQUEIDENTIFIER NOT NULL,
    [CreateDate] DATETIME2 (7) NULL,
    PRIMARY KEY CLUSTERED ([Identifier] ASC, [IdentifierType] ASC, [FromService] ASC, [ToService] ASC, [OnContract] ASC) ON [PRIMARY],
    UNIQUE NONCLUSTERED ([Handle] ASC) ON [PRIMARY]
    ) ON [PRIMARY];
    What appears to be happening is the DELETE is somehow deadlocking with the SEND but I am not sure how since I am using them in the same order in both the send procedure and the activated procedure. Also, RCSI is enabled on the database I am receiving the
    deadlocks on.
    EDIT:
    I think I have found the culprit with lock acquisition order:
    - In the usp_SendMessage proc:
    The SELECT locks the conversation record
    The SEND locks the conversation group
    - In the timer activated proc on the initiator queue:
    The RECEIVE locks the conversation group
    The DELETE locks the conversation record
    Given that I think there may be a few solutions:
    There is some subtle difference between my code and the code from the article that I am not noticing that when fixed will resolve the deadlocking. I am hoping this is the case since it seems that others have used this pattern without issues as far as I
    know.
    Or...The deadlocking is inherent to the pattern the code is using and I can either:
    Deal with the deadlocking by adjusting the deadlock priority on the activated stored procedure so that it becomes the victim, and I can implement retry logic.
    Remove conversation timers and activation all together and resort to some sort of job that expires the conversation by polling it, where I can control the ordering.
    My ultimate goal is to eliminate any deadlocking on usp_SendMessage so that it "never" fails.
    I appreciate any feedback!
    Thanks

    I can understand why the deadlock happens. As you point out the activation procedure and the send SP acquire locks on the resources in reverse order.
    Really why Remus does not consider this in his blog post, I don't know. But may I ask, since you have replaced @@spid as a key with two other columns, does this also mean that multiple processes can use the same conversation? I'm not so sure that
    this is a good idea. I worked with an SB implementation which reuses conversations some months ago, and I recall that considered channing the pattern, but that I decided against it the end although I don't remember the exact details.
    But so much is clear, if multiple processes can use the same handle, they will serialise on the SELECT with UPDLOCK. That will not happen if you change to REPEATABLEREAD, but I guess they will serialiase on the SEND instead.
    The best way to address the problem appears to use SET LOCK_TIMEOUT 0 in the activation procedure and trap the the timeout error in a CATCH block, and let the message go back to the queue. This should be better than SET DEADLOCK_PRIORITY, since the there
    will never be a deadlock that upholds the the sender.
    Erland Sommarskog, SQL Server MVP, [email protected]

  • Oracle deadlock - how to use "synchronised" keyword in a transaction?

    Hi,
    I use WL6.1 SP4, Oracle 8.1.6, with some Java objects which execute a
    lot
    of SQL queries (mixed update, insert and select) using plain JDBC
    calls,
    and Weblogic connection pools. These objects are called by servlets.
    I experienced recently deadlocks when two users call the object at the
    same
    time (See error below).
    I execute the queries using "synchronized" keyword in the following
    way:
    synchronized (this)
    conConnection.setAutoCommit(false);
    executeTransaction(myStatement);
    conConnection.commit();
    executeTransaction is overriden in sub-classes and is the method which
    executes
    all the queries.
    It calls methods in other objects. These methods are not declared as
    synchronized.
    1) Should they?
    2) Should I use the keyword "synchronized" in another way?
    3) This part of code is also called when I do only "select"
    statements. I guess
    it should only be synchronized when we do "update" and "insert" which
    could lead
    to a deadlock?
    4) Do you have any idea why this deadlock occurs as I use the
    "synchronized"
    keyword, and one thread should wait until the other one has finished?
    Thanks for any idea,
    Stéphanie
    ----------------- error:
    <ExecuteThread: '4' for queue: 'default'> <> <> <000000> <SQL request
    sent to database: UPDATE PARTICIPANT par SET par.PARTICIPANTLASTRANK =
    4 WHERE par.IDPARTICIPANT = 8983566>
    <ExecuteThread: '11' for queue: 'default'> <> <> <000000> <SQL request
    sent to database: UPDATE PARTICIPANT par SET par.PARTICIPANTLASTRANK =
    6 WHERE par.IDPARTICIPANT = 8983570>
    ORA-00060: deadlock detected while waiting for resource
         at oracle.jdbc.dbaccess.DBError.throwSqlException(DBError.java:134)
         at oracle.jdbc.ttc7.TTIoer.processError(TTIoer.java:289)
         at oracle.jdbc.ttc7.Oall7.receive(Oall7.java:573)
         at oracle.jdbc.ttc7.TTC7Protocol.doOall7(TTC7Protocol.java:1891)
         at oracle.jdbc.ttc7.TTC7Protocol.parseExecuteFetch(TTC7Protocol.java:1093)
         at oracle.jdbc.driver.OracleStatement.executeNonQuery(OracleStatement.java:2047)
         at oracle.jdbc.driver.OracleStatement.doExecuteOther(OracleStatement.java:1940)
         at oracle.jdbc.driver.OracleStatement.doExecuteWithTimeout(OracleStatement.java:2709)
         at oracle.jdbc.driver.OracleStatement.executeUpdate(OracleStatement.java:796)
         at weblogic.jdbc.pool.Statement.executeUpdate(Statement.java:872)
         at weblogic.jdbc.rmi.internal.StatementImpl.executeUpdate(StatementImpl.java:89)
         at weblogic.jdbc.rmi.SerialStatement.executeUpdate(SerialStatement.java:100)
         at bfinance.framework.EDBBLBean.executeSQL(EDBBLBean.java:299)

    Hi Stepanie,
    I'd try to group update statement together. Usually it helps.
    Regards,
    Slava Imeshev
    "Stephanie" <[email protected]> wrote in message
    news:[email protected]...
    Thanks for your answer.
    In the case you describe, is there a way to ensure that tx-2 waits for
    tx-1
    to be finished before beginning?
    My transaction which causes the problem is the following (simplified):
    UPDATE tableA SET islast=0 WHERE externalid=myid;
    for (int i=0; i< aVector.size(); i++) {
    INSERT INTO tableA (id, islast, ranking, externalid) (SELECT
    SEQ_tableA.nextval, 1, 0, myid);
    UPDATE tableA SET ranking = /*calculated ranking */
    WHERE externalid=myid AND islast=1;
    UPDATE tableB ....
    commit;
    tx-1 and tx-2 execute this transaction at the same time. tx-1 begins
    The deadlock appears when tx-2 executes the second UPDATE tableA
    query.
    I don't see how I can avoid to execute these two update queries, so if
    I can find another way to prevent deadlock, it would be great!
    Stéphanie
    Joseph Weinstein <[email protected]_this> wrote in message
    news:<[email protected]_this>...
    Stephanie wrote:
    Hi,
    I use WL6.1 SP4, Oracle 8.1.6, with some Java objects which execute a
    lot
    of SQL queries (mixed update, insert and select) using plain JDBC
    calls,
    and Weblogic connection pools. These objects are called by servlets.
    I experienced recently deadlocks when two users call the object at the
    same
    time (See error below).Hi. The error you are getting isn't necessarily from a lack ofsynchronization
    of your java objects. It has to do with the order in which you accessDBMS
    data. You are getting ordinary DBMS deadlocks, which are caused when
    two DBMS connections each have a lock the other wants, in order toproceed.
    The DBMS will quickly discover this and will kill one transaction inorder to
    let the other one proceed:
    time 0: tx-1 and tx-2 have started.....
    time 1: tx-1: update tableA set val = 1 where key = 'A'
    time 2: tx-2: update tableB set val = 2 where key = 'B'
    time 3: tx-1: update tableB set val = 1 where key = 'B' (waitsbecause tx-2 has the row
    locked)
    time 4: tx-2: update tableA set val = 2 where key = 'A' (waitsbecause tx-1 has the row
    locked)
    This is a deadlock. The solution is to organize your application code sothat every
    transaction accesses the data in the same order, eg: update tableAfirst, then update tableB.
    This will prevent deadlocks.
    Joe Weinstein at BEA
    I execute the queries using "synchronized" keyword in the following
    way:
    synchronized (this)
    conConnection.setAutoCommit(false);
    executeTransaction(myStatement);
    conConnection.commit();
    executeTransaction is overriden in sub-classes and is the method which
    executes
    all the queries.
    It calls methods in other objects. These methods are not declared as
    synchronized.
    1) Should they?
    2) Should I use the keyword "synchronized" in another way?
    3) This part of code is also called when I do only "select"
    statements. I guess
    it should only be synchronized when we do "update" and "insert" which
    could lead
    to a deadlock?
    4) Do you have any idea why this deadlock occurs as I use the
    "synchronized"
    keyword, and one thread should wait until the other one has finished?
    Thanks for any idea,
    Stéphanie
    ----------------- error:
    <ExecuteThread: '4' for queue: 'default'> <> <> <000000> <SQL request
    sent to database: UPDATE PARTICIPANT par SET par.PARTICIPANTLASTRANK =
    4 WHERE par.IDPARTICIPANT = 8983566>
    <ExecuteThread: '11' for queue: 'default'> <> <> <000000> <SQL request
    sent to database: UPDATE PARTICIPANT par SET par.PARTICIPANTLASTRANK =
    6 WHERE par.IDPARTICIPANT = 8983570>
    ORA-00060: deadlock detected while waiting for resource
    at
    oracle.jdbc.dbaccess.DBError.throwSqlException(DBError.java:134)
    at oracle.jdbc.ttc7.TTIoer.processError(TTIoer.java:289)
    at oracle.jdbc.ttc7.Oall7.receive(Oall7.java:573)
    atoracle.jdbc.ttc7.TTC7Protocol.doOall7(TTC7Protocol.java:1891)
    atoracle.jdbc.ttc7.TTC7Protocol.parseExecuteFetch(TTC7Protocol.java:1093)
    atoracle.jdbc.driver.OracleStatement.executeNonQuery(OracleStatement.java:2047
    atoracle.jdbc.driver.OracleStatement.doExecuteOther(OracleStatement.java:1940)
    atoracle.jdbc.driver.OracleStatement.doExecuteWithTimeout(OracleStatement.java
    :2709)
    atoracle.jdbc.driver.OracleStatement.executeUpdate(OracleStatement.java:796)
    atweblogic.jdbc.pool.Statement.executeUpdate(Statement.java:872)
    atweblogic.jdbc.rmi.internal.StatementImpl.executeUpdate(StatementImpl.java:89
    atweblogic.jdbc.rmi.SerialStatement.executeUpdate(SerialStatement.java:100)
    at bfinance.framework.EDBBLBean.executeSQL(EDBBLBean.java:299)

  • Reason for Deadlock Insert Select on same table

    I have obtained the deadlock graph. However, I still don't understand why a deadlock occurs. Can someone
    explain it to me? Thanks in advance.
    deadlock-list
    deadlock victim=process3a59438
     process-list
      process id=process3a58c58 taskpriority=0 logused=1093420 waitresource=PAGE: 60:1:1113 waittime=203 ownerId=245203560 transactionname=implicit_transaction lasttranstarted=2014-05-06T09:46:41.930 XDES=0xbe3bd8370 lockMode=IX
    schedulerid=9 kpid=11368 status=suspended spid=223 sbid=0 ecid=0 priority=0 transcount=2 lastbatchstarted=2014-05-06T09:46:55.933 lastbatchcompleted=2014-05-06T09:46:55.933 clientapp=jTDS hostname=CINAM1103 hostpid=123 loginname=clienta isolationlevel=read
    committed (2) xactid=245203560 currentdb=60 lockTimeout=4294967295 clientoption1=671088672 clientoption2=128058
       executionStack
        frame procname=adhoc line=1 stmtstart=320 sqlhandle=0x0200000013d63b16b7180b66ed9196aa2502a611a28bac73
    insert into [TableA] (version_id, tuple_signature, start_time_member_id, end_time_member_id, member_list, type_cd, delta, ordinal, dollar_value, delta_id) values ( @P0 ,  @P1 ,  @P2 ,  @P3 ,  @P4 ,  @P5
    ,  @P6 ,  @P7 ,  @P8 ,  @P9 )     
       inputbuf
    (@P0 nvarchar(4000),@P1 nvarchar(4000),@P2 nvarchar(4000),@P3 nvarchar(4000),@P4 nvarchar(4000),@P5 int,@P6 float,@P7 int,@P8 nvarchar(4000),@P9 nvarchar(4000))insert into [TableA] (version_id, tuple_signature, start_time_member_id,
    end_time_member_id, member_list, type_cd, delta, ordinal, dollar_value, delta_id) values ( @P0 ,  @P1 ,  @P2 ,  @P3 ,  @P4 ,  @P5 ,  @P6 ,  @P7 ,  @P8 ,  @P9 )    
      process id=process3a59438 taskpriority=0 logused=0 waitresource=PAGE: 60:1:11867 waittime=703 ownerId=245205763 transactionname=SELECT lasttranstarted=2014-05-06T09:46:55.407 XDES=0x45b5132b0 lockMode=S schedulerid=9
    kpid=10300 status=suspended spid=243 sbid=0 ecid=2 priority=0 transcount=0 lastbatchstarted=2014-05-06T09:46:55.407 lastbatchcompleted=2014-05-06T09:46:54.783 clientapp=jTDS hostname=CINAM1103 hostpid=123 isolationlevel=read committed (2) xactid=245205763
    currentdb=60 lockTimeout=4294967295 clientoption1=671088672 clientoption2=128056
       executionStack
        frame procname=adhoc line=1 stmtstart=40 sqlhandle=0x020000002811a70d11559b907ff33e99750ba56c92d1db68
    select deltadefin0_.delta_id as delta1_38_, deltadefin0_.version_id as plan2_38_, deltadefin0_.tuple_signature as tuple3_38_, deltadefin0_.start_time_member_id as start4_38_, deltadefin0_.end_time_member_id as end5_38_,
    deltadefin0_.member_list as member6_38_, deltadefin0_.type_cd as type7_38_, deltadefin0_.delta as delta38_, deltadefin0_.ordinal as ordinal38_, deltadefin0_.dollar_value as dollar10_38_ from [TableA] deltadefin0_ where deltadefin0_.version_id= @P0     
       inputbuf
     resource-list
      pagelock fileid=1 pageid=1113 dbid=60 objectname=aa_core_clienta_totalga_q1_i01_p.dbo.TableA id=lock8ad856080 mode=S associatedObjectId=72057594040680448
       owner-list
        owner id=process3a59438 mode=S
       waiter-list
        waiter id=process3a58c58 mode=IX requestType=wait
      pagelock fileid=1 pageid=11867 dbid=60 objectname=aa_core_clienta_totalga_q1_i01_p.dbo.TableA id=lock8b3af0a80 mode=IX associatedObjectId=72057594040680448
       owner-list
        owner id=process3a58c58 mode=IX
       waiter-list
        waiter id=process3a59438 mode=S requestType=wait

    process 3a58c58 was running an insert and had an intent exclusive lock on page 11867 but also needed one on 1113 unfortunately process 3a59438 had a shared lock on that same page and wanted to push a shared lock on page 11867. Each held the resource the
    other wanted which is a classic "deadlock" situation. Since the way victims are determined in SQL Server is based on transaction log usage, the select (process 3a59438) was the victim and killed. Shared locks and Intent Exclusive locks are not compatible.
    It's interesting to note that the insert process had 2 open transactions. If this is a problem I'd have the software company that makes it look into their processes and make the transactions shorter or opt for some type of optimistic currency instead of
    pessimistic.
    The trancount often shows two when there's only one transaction explicitly opened on the connection (grouping the two inserts), the second is simply the execution of the current statement.
    But for the rest, there are two questions any developer has to ask.  First, why is a select in the default read-committed isolation taking locks at all and then second, why doesn't it take whatever locks it wants atomically, because apparently it took
    one, was interrupted, then tried to take the second, thus getting lost in the deadlock.
    And then third - what can they *do* about it?  Breaking the transaction in the first process does not seem relevant.
    Josh
    ps - the answer, "well try to make everything go faster with the right index for the select etc" is something, but is it enough?

  • BDB dumps core after adding approx 19MB of data

    Hi,
    BDB core dumps after adding about 19MB of data & killing and restarting it several times.
    Stack trace :
    #0 0xc00000000033cad0:0 in kill+0x30 () from /usr/lib/hpux64/libc.so.1
    (gdb) bt
    #0 0xc00000000033cad0:0 in kill+0x30 () from /usr/lib/hpux64/libc.so.1
    #1 0xc000000000260cf0:0 in raise+0x30 () from /usr/lib/hpux64/libc.so.1
    #2 0xc0000000002fe710:0 in abort+0x190 () from /usr/lib/hpux64/libc.so.1
    warning:
    ERROR: Use the "objectdir" command to specify the search
    path for objectfile db_err.o.
    If NOT specified will behave as a non -g compiled binary.
    warning: No unwind information found.
    Skipping this library /integhome/jobin/B063_runEnv/add-ons/lib/libicudata.sl.34.
    #3 0xc000000022ec2340:0 in __db_assert+0xc0 ()
    from /integhome/jobin/B063_runEnv/service/sys/servicerun/bin/libdb_cxx-4.3.so
    warning:
    ERROR: Use the "objectdir" command to specify the search
    path for objectfile db_meta.o.
    If NOT specified will behave as a non -g compiled binary.
    #4 0xc000000022ed2870:0 in __db_new+0x780 ()
    from /integhome/jobin/B063_runEnv/service/sys/servicerun/bin/libdb_cxx-4.3.so
    warning:
    ERROR: Use the "objectdir" command to specify the search
    path for objectfile bt_split.o.
    If NOT specified will behave as a non -g compiled binary.
    #5 0xc000000022ded690:0 in __bam_root+0xb0 ()
    from /integhome/jobin/B063_runEnv/service/sys/servicerun/bin/libdb_cxx-4.3.so
    #6 0xc000000022ded2d0:0 in __bam_split+0x1e0 ()
    from /integhome/jobin/B063_runEnv/service/sys/servicerun/bin/libdb_cxx-4.3.so
    warning:
    ERROR: Use the "objectdir" command to specify the search
    path for objectfile bt_cursor.o.
    If NOT specified will behave as a non -g compiled binary.
    #7 0xc000000022dc83f0:0 in __bam_c_put+0x360 ()
    from /integhome/jobin/B063_runEnv/service/sys/servicerun/bin/libdb_cxx-4.3.so
    warning:
    ERROR: Use the "objectdir" command to specify the search
    path for objectfile db_cam.o.
    If NOT specified will behave as a non -g compiled binary.
    #8 0xc000000022eb8c10:0 in __db_c_put+0x740 ()
    from /integhome/jobin/B063_runEnv/service/sys/servicerun/bin/libdb_cxx-4.3.so
    warning:
    ERROR: Use the "objectdir" command to specify the search
    path for objectfile db_am.o.
    If NOT specified will behave as a non -g compiled binary.
    #9 0xc000000022ea4100:0 in __db_put+0x4c0 ()
    from /integhome/jobin/B063_runEnv/service/sys/servicerun/bin/libdb_cxx-4.3.so---Type <return> to continue, or q <return> to quit---
    warning:
    ERROR: Use the "objectdir" command to specify the search
    path for objectfile db_iface.o.
    If NOT specified will behave as a non -g compiled binary.
    #10 0xc000000022eca7a0:0 in __db_put_pp+0x240 ()
    from /integhome/jobin/B063_runEnv/service/sys/servicerun/bin/libdb_cxx-4.3.so
    warning:
    ERROR: Use the "objectdir" command to specify the search
    path for objectfile cxx_db.o.
    If NOT specified will behave as a non -g compiled binary.
    #11 0xc000000022d92c90:0 in Db::put(DbTxn*,Dbt*,Dbt*,unsigned int)+0x120 ()
    from /integhome/jobin/B063_runEnv/service/sys/servicerun/bin/libdb_cxx-4.3.so
    What is the behaviour of BDB if its killed & restarted when a bdb transaction is in progress?
    anybody has an idea as to why BDB dumps core in above scenario?
    Regards
    Sandhya

    Hi Bogdan,
    As suggested by you i am using the below flags to open an enviornment.
    DB_RECOVER |DB_CREATE | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN|DB_THREAD
    DB_INIT_LOCK is not used because at our application level we are maintaining a lock to guard against multiple simultaneous access.
    The foll msg is output on the console & the dumps core with same stack trace as posted before.
    __db_assert: "last == pgno" failed: file "../dist/../db/db_meta.c", line 163
    I ran db_verify, db_stat, db_recover tools on the DB & thier results are as below.
    db_verify <dbfile>
    db_verify: Page 4965: partially zeroed page
    db_verify: ./configserviceDB: DB_VERIFY_BAD: Database verification failed
    db_recover -v
    Finding last valid log LSN: file: 1 offset 42872
    Recovery starting from [1][42200]
    Recovery complete at Sat Jul 28 17:40:36 2007
    Maximum transaction ID 8000000b Recovery checkpoint [1][42964]
    db_stat -d <dbfile>
    53162 Btree magic number
    9 Btree version number
    Big-endian Byte order
    Flags
    2 Minimum keys per-page
    8192 Underlying database page size
    1 Number of levels in the tree
    60 Number of unique keys in the tree
    60 Number of data items in the tree
    0 Number of tree internal pages
    0 Number of bytes free in tree internal pages (0% ff)
    1 Number of tree leaf pages
    62 Number of bytes free in tree leaf pages (99% ff)
    0 Number of tree duplicate pages
    0 Number of bytes free in tree duplicate pages (0% ff)
    0 Number of tree overflow pages
    0 Number of bytes free in tree overflow pages (0% ff)
    0 Number of empty pages
    0 Number of pages on the free list
    db_stat -E <dbfile>
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Default database environment information:
    4.3.28 Environment version
    0x120897 Magic number
    0 Panic value
    2 References
    0 The number of region locks that required waiting (0%)
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Per region database environment information:
    Mpool Region:
    2 Region ID
    -1 Segment ID
    1MB 264KB Size
    0 The number of region locks that required waiting (0%)
    Log Region:
    3 Region ID
    -1 Segment ID
    1MB 64KB Size
    0 The number of region locks that required waiting (0%)
    Transaction Region:
    4 Region ID
    -1 Segment ID
    16KB Size
    0 The number of region locks that required waiting (0%)
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    DB_ENV handle information:
    Set Errfile
    db_stat Errpfx
    !Set Errcall
    !Set Feedback
    !Set Panic
    !Set Malloc
    !Set Realloc
    !Set Free
    Verbose flags
    !Set App private
    !Set App dispatch
    !Set Home
    !Set Log dir
    /integhome/jobin/B064_July2/runEnv/temp Tmp dir
    !Set Data dir
    0660 Mode
    DB_INIT_LOG, DB_INIT_MPOOL, DB_INIT_TXN, DB_USE_ENVIRON Open flags
    !Set Lockfhp
    Set Rec tab
    187 Rec tab slots
    !Set RPC client
    0 RPC client ID
    0 DB ref count
    -1 Shared mem key
    400 test-and-set spin configuration
    !Set DB handle mutex
    !Set api1 internal
    !Set api2 internal
    !Set password
    !Set crypto handle
    !Set MT mutex
    DB_ENV_LOG_AUTOREMOVE, DB_ENV_OPEN_CALLED Flags
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Default logging region information:
    0x40988 Log magic number
    10 Log version number
    1MB Log record cache size
    0660 Log file mode
    1Mb Current log file size
    632B Log bytes written
    632B Log bytes written since last checkpoint
    1 Total log file writes
    0 Total log file write due to overflow
    1 Total log file flushes
    1 Current log file number
    42872 Current log file offset
    1 On-disk log file number
    42872 On-disk log file offset
    1 Maximum commits in a log flush
    1 Minimum commits in a log flush
    1MB 64KB Log region size
    0 The number of region locks that required waiting (0%)
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Log REGINFO information:
    Log Region type
    3 Region ID
    __db.003 Region name
    0xc00000000b774000 Original region address
    0xc00000000b774000 Region address
    0xc00000000b883dd0 Region primary address
    0 Region maximum allocation
    0 Region allocated
    REGION_JOIN_OK Region flags
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    DB_LOG handle information:
    !Set DB_LOG handle mutex
    0 Log file name
    !Set Log file handle
    Flags
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    LOG handle information:
    0 file name list mutex (0%)
    0x40988 persist.magic
    10 persist.version
    0 persist.log_size
    0660 persist.mode
    1/42872 current file offset LSN
    1/42872 first buffer byte LSN
    0 current buffer offset
    42872 current file write offset
    68 length of last record
    0 log flush in progress
    0 Log flush mutex (0%)
    1/42872 last sync LSN
    1/41475 cached checkpoint LSN
    1MB log buffer size
    1MB log file size
    1MB next log file size
    0 transactions waiting to commit
    1/0 LSN of first commit
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    LOG FNAME list:
    0 File name mutex (0%)
    1 Fid max
    ID Name Type Pgno Txnid DBP-info
    0 configserviceDB btree 0 0 No DBP 0 0 0
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Default cache region information:
    1MB 262KB 960B Total cache size
    1 Number of caches
    1MB 264KB Pool individual cache size
    0 Maximum memory-mapped file size
    0 Maximum open file descriptors
    0 Maximum sequential buffer writes
    0 Sleep after writing maximum sequential buffers
    0 Requested pages mapped into the process' address space
    43312 Requested pages found in the cache (89%)
    4968 Requested pages not found in the cache
    640 Pages created in the cache
    4965 Pages read into the cache
    621 Pages written from the cache to the backing file
    4818 Clean pages forced from the cache
    621 Dirty pages forced from the cache
    0 Dirty pages written by trickle-sync thread
    166 Current total page count
    146 Current clean page count
    20 Current dirty page count
    131 Number of hash buckets used for page location
    53888 Total number of times hash chains searched for a page
    4 The longest hash chain searched for a page
    92783 Total number of hash buckets examined for page location
    0 The number of hash bucket locks that required waiting (0%)
    0 The maximum number of times any hash bucket lock was waited for
    0 The number of region locks that required waiting (0%)
    5615 The number of page allocations
    10931 The number of hash buckets examined during allocations
    22 The maximum number of hash buckets examined for an allocation
    5439 The number of pages examined during allocations
    11 The max number of pages examined for an allocation
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Pool File: temporary
    1024 Page size
    0 Requested pages mapped into the process' address space
    43245 Requested pages found in the cache (99%)
    1 Requested pages not found in the cache
    635 Pages created in the cache
    0 Pages read into the cache
    617 Pages written from the cache to the backing file
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Pool File: configserviceDB
    8192 Page size
    0 Requested pages mapped into the process' address space
    65 Requested pages found in the cache (1%)
    4965 Requested pages not found in the cache
    1 Pages created in the cache
    4965 Pages read into the cache
    0 Pages written from the cache to the backing file
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Mpool REGINFO information:
    Mpool Region type
    2 Region ID
    __db.002 Region name
    0xc00000000b632000 Original region address
    0xc00000000b632000 Region address
    0xc00000000b773f08 Region primary address
    0 Region maximum allocation
    0 Region allocated
    REGION_JOIN_OK Region flags
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    MPOOL structure:
    0/0 Maximum checkpoint LSN
    131 Hash table entries
    64 Hash table last-checked
    48905 Hash table LRU count
    48914 Put counter
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    DB_MPOOL handle information:
    !Set DB_MPOOL handle mutex
    1 Underlying cache regions
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    DB_MPOOLFILE structures:
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    MPOOLFILE structures:
    File #1: temporary
    0 Mutex (0%)
    0 Reference count
    18 Block count
    634 Last page number
    0 Original last page number
    0 Maximum page number
    0 Type
    0 Priority
    0 Page's LSN offset
    32 Page's clear length
    0 0 0 0 0 0 0 8 0 0 0 0 0 0 0 f8 0 0 0 0 ID
    deadfile, file written Flags
    File #2: configserviceDB
    0 Mutex (0%)
    1 Reference count
    148 Block count
    4965 Last page number
    4964 Original last page number
    0 Maximum page number
    0 Type
    0 Priority
    0 Page's LSN offset
    32 Page's clear length
    0 0 b6 59 40 1 0 2 39 ac 13 6f 0 a df 18 0 0 0 0 ID
    file written Flags
    =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    Cache #1:
    BH hash table (131 hash slots)
    bucket #: priority, mutex
    pageno, file, ref, LSN, mutex, address, priority, flags
    bucket 0: 47385, 0/0%:
    4813, #2, 0, 0/1, 0/0%, 0x04acf0, 47385
    4944, #2, 0, 0/0, 0/0%, 0x020c18, 48692

  • Deadlock: select and DDL

    Hello,
    I have an Oracle application. We have select sql statements and statements to enable constraints (for example, foreign keys and primary Key). The Select SQL statements and statement to enable constraints may be executed in parallel. Sometimes, we get deadlock. Can someone explain to me why the deadlock happened?
    Thanks,
    Edited by: slsam01 on Apr 3, 2009 3:18 PM

    Very bad things are happening here. Can't understand why this basic concept is being misunderstood.
    1. The document you pointed is not of Oracle. It's about berkley DB which I've never used. :(
    2. The truth about locking in the select query.
    - Oracle never(again never!) acquires table lock(TM lock) nor row level lock(TX lock) for the select query.
    - select ... for update statement needs TM lock in RS mode and TX lock in X mode. It's just like DML statement.
    - The parse phase(not execute phase) needs the library cache related lock for the target table. For instance, during the soft parse phase, Oracle acquires library cache lock on the table. During the hard parse(optimization) phase, Oracle acquires the library cache pin on the table.
    It's Oracle's promise that select statements never lock the table in the macro level(TM lock, TX lock). But in the micro level, it sometimes need to lock the table. Library cache lock and library cache pin are good examples.
    ================================
    Dion Cho - Oracle Performance Storyteller
    http://dioncho.wordpress.com (english)
    http://ukja.tistory.com (korean)
    ================================

  • DeadLocks on e_waitPipeNewRow Wait type

    Hi Experts,
    We are receiving hundread deadlocks daily with WaitType = "e_waitPipeNewRow". I am not sure why these deadlock occuring on prod server. I suspect this is reated to something to SSIS packages that are running on machine plus some latest
    updates are missing on servers. Can you guys please throw some light on this and provide the solution to get rid of this.
    Shivraj Patil.

    Hi experts,
    Which kind of this deadlock is normal or Intra para query dead lock , beacuse i see Page Lock keyword in Graph.
    <deadlock>
    <victim-list>
    <victimProcess id="process2b41ce08" />
    </victim-list>
    <process-list>
    <process id="process2b41ce08" taskpriority="0" logused="360" waitresource="PAGE: 10:5:451520" waittime="3255" ownerId="13432730290" transactionname="UPDATE" lasttranstarted="2014-11-25T19:26:25.777" XDES="0xe8b07d970" lockMode="IU" schedulerid="23" kpid="250372" status="suspended" spid="281" sbid="0" ecid="0" priority="0" trancount="2" lastbatchstarted="2014-11-25T19:26:25.327" lastbatchcompleted="2014-11-25T19:26:25.327" clientapp=".Net SqlClient Data Provider" hostname="LDNPSM032220" hostpid="2240" loginname="INTRANET\sysCWBASPNET" isolationlevel="read committed (2)" xactid="13432730290" currentdb="10" lockTimeout="4294967295" clientoption1="538970208" clientoption2="128056">
    <executionStack>
    <frame procname="" line="259" stmtstart="27128" stmtend="29648" sqlhandle="0x03000a00c67da66c2575ac0051a300000100000000000000" />
    </executionStack>
    <inputbuf>
    Proc [Database Id = 10 Object Id = 1822850502] </inputbuf>
    </process>
    <process id="process3243bb88" taskpriority="0" logused="10000" waittime="3156" schedulerid="36" kpid="281440" status="suspended" spid="277" sbid="0" ecid="2" priority="0" trancount="0" lastbatchstarted="2014-11-25T19:26:25.287" lastbatchcompleted="2014-11-25T19:26:25.287" clientapp=".Net SqlClient Data Provider" hostname="LDNPSM032220" hostpid="2240" isolationlevel="read committed (2)" xactid="13432730596" currentdb="10" lockTimeout="4294967295" clientoption1="538970208" clientoption2="128056">
    <executionStack>
    <frame procname="" line="279" stmtstart="29650" stmtend="30242" sqlhandle="0x03000a00c67da66c2575ac0051a300000100000000000000" />
    </executionStack>
    <inputbuf />
    </process>
    <process id="process32458bc8" taskpriority="0" logused="10000" waittime="3181" schedulerid="39" kpid="277808" status="suspended" spid="277" sbid="0" ecid="3" priority="0" trancount="0" lastbatchstarted="2014-11-25T19:26:25.287" lastbatchcompleted="2014-11-25T19:26:25.287" clientapp=".Net SqlClient Data Provider" hostname="LDNPSM032220" hostpid="2240" isolationlevel="read committed (2)" xactid="13432730596" currentdb="10" lockTimeout="4294967295" clientoption1="538970208" clientoption2="128056">
    <executionStack>
    <frame procname="" line="279" stmtstart="29650" stmtend="30242" sqlhandle="0x03000a00c67da66c2575ac0051a300000100000000000000" />
    </executionStack>
    <inputbuf />
    </process>
    <process id="process32431b88" taskpriority="0" logused="10000" waittime="3166" schedulerid="35" kpid="253808" status="suspended" spid="277" sbid="0" ecid="4" priority="0" trancount="0" lastbatchstarted="2014-11-25T19:26:25.287" lastbatchcompleted="2014-11-25T19:26:25.287" clientapp=".Net SqlClient Data Provider" hostname="LDNPSM032220" hostpid="2240" isolationlevel="read committed (2)" xactid="13432730596" currentdb="10" lockTimeout="4294967295" clientoption1="538970208" clientoption2="128056">
    <executionStack>
    <frame procname="" line="279" stmtstart="29650" stmtend="30242" sqlhandle="0x03000a00c67da66c2575ac0051a300000100000000000000" />
    </executionStack>
    <inputbuf />
    </process>
    <process id="process32445708" taskpriority="0" logused="10000" waittime="3203" schedulerid="37" kpid="277024" status="suspended" spid="277" sbid="0" ecid="1" priority="0" trancount="0" lastbatchstarted="2014-11-25T19:26:25.287" lastbatchcompleted="2014-11-25T19:26:25.287" clientapp=".Net SqlClient Data Provider" hostname="LDNPSM032220" hostpid="2240" isolationlevel="read committed (2)" xactid="13432730596" currentdb="10" lockTimeout="4294967295" clientoption1="538970208" clientoption2="128056">
    <executionStack>
    <frame procname="" line="279" stmtstart="29650" stmtend="30242" sqlhandle="0x03000a00c67da66c2575ac0051a300000100000000000000" />
    </executionStack>
    <inputbuf />
    </process>
    <process id="process5c4ee08" taskpriority="0" logused="10000" waittime="3136" schedulerid="18" kpid="261740" status="suspended" spid="277" sbid="0" ecid="0" priority="0" trancount="2" lastbatchstarted="2014-11-25T19:26:25.287" lastbatchcompleted="2014-11-25T19:26:25.287" clientapp=".Net SqlClient Data Provider" hostname="LDNPSM032220" hostpid="2240" loginname="INTRANET\sysCWBASPNET" isolationlevel="read committed (2)" xactid="13432730596" currentdb="10" lockTimeout="4294967295" clientoption1="538970208" clientoption2="128056">
    <executionStack>
    <frame procname="" line="279" stmtstart="29650" stmtend="30242" sqlhandle="0x03000a00c67da66c2575ac0051a300000100000000000000" />
    </executionStack>
    <inputbuf>
    Proc [Database Id = 10 Object Id = 1822850502] </inputbuf>
    </process>
    <process id="process32444988" taskpriority="0" logused="1138880" waitresource="PAGE: 10:12:435718" waittime="3258" ownerId="13432730596" transactionname="UPDATE" lasttranstarted="2014-11-25T19:26:25.800" XDES="0x18cb1136e0" lockMode="U" schedulerid="37" kpid="261676" status="suspended" spid="277" sbid="0" ecid="8" priority="0" trancount="0" lastbatchstarted="2014-11-25T19:26:25.287" lastbatchcompleted="2014-11-25T19:26:25.287" clientapp=".Net SqlClient Data Provider" hostname="LDNPSM032220" hostpid="2240" isolationlevel="read committed (2)" xactid="13432730596" currentdb="10" lockTimeout="4294967295" clientoption1="538970208" clientoption2="128056">
    <executionStack>
    <frame procname="" line="279" stmtstart="29650" stmtend="30242" sqlhandle="0x03000a00c67da66c2575ac0051a300000100000000000000" />
    </executionStack>
    <inputbuf />
    </process>
    </process-list>
    <resource-list>
    <pagelock fileid="5" pageid="451520" dbid="10" objectname="" id="lock178f827f80" mode="UIX" associatedObjectId="72057604489740288">
    <owner-list>
    <owner id="process5c4ee08" mode="IX" />
    <owner id="process5c4ee08" mode="U" />
    </owner-list>
    <waiter-list>
    <waiter id="process2b41ce08" mode="IU" requestType="wait" />
    </waiter-list>
    </pagelock>
    <exchangeEvent id="Pipe149719eb00" WaitType="e_waitPipeGetRow" nodeId="8">
    <owner-list>
    <owner id="process32444988" />
    </owner-list>
    <waiter-list>
    <waiter id="process3243bb88" />
    </waiter-list>
    </exchangeEvent>
    <exchangeEvent id="Pipe149719eb80" WaitType="e_waitPipeGetRow" nodeId="8">
    <owner-list>
    <owner id="process32444988" />
    </owner-list>
    <waiter-list>
    <waiter id="process32458bc8" />
    </waiter-list>
    </exchangeEvent>
    <exchangeEvent id="Pipe149719ec00" WaitType="e_waitPipeGetRow" nodeId="8">
    <owner-list>
    <owner id="process32444988" />
    </owner-list>
    <waiter-list>
    <waiter id="process32431b88" />
    </waiter-list>
    </exchangeEvent>
    <exchangeEvent id="Pipe149719ea80" WaitType="e_waitPipeGetRow" nodeId="8">
    <owner-list>
    <owner id="process32444988" />
    </owner-list>
    <waiter-list>
    <waiter id="process32445708" />
    </waiter-list>
    </exchangeEvent>
    <exchangeEvent id="Pipe149719e300" WaitType="e_waitPipeGetRow" nodeId="3">
    <owner-list>
    <owner id="process32445708" />
    <owner id="process3243bb88" />
    <owner id="process32458bc8" />
    <owner id="process32431b88" />
    </owner-list>
    <waiter-list>
    <waiter id="process5c4ee08" />
    </waiter-list>
    </exchangeEvent>
    <pagelock fileid="12" pageid="435718" dbid="10" objectname="" id="lockdfda7ef80" mode="IX" associatedObjectId="72057604489740288">
    <owner-list>
    <owner id="process2b41ce08" mode="IX" />
    </owner-list>
    <waiter-list>
    <waiter id="process32444988" mode="U" requestType="wait" />
    </waiter-list>
    </pagelock>
    </resource-list>
    </deadlock>
    Shivraj Patil.

  • I tested BDB vs SQLite3 on android phone, and result , question

    I have tested benchmark on my android phone, using RL Benchmark application.
    I expected that bdb is better than sqlite , but result was opposite.
    Almost every field , bdb was worse than sqlite .
    I saw bdb source little, in DS perspective. But I don't know why bdb is worse than sqlite.
    Who can teach me , see the result. please. thank you
    OS : Android 2.3.3
    phone : HTC Desire (korea version)
    Application : RL benchmark ( by red-license )
    the result is here.
    https://spreadsheets.google.com/spreadsheet/ccc?key=0AoZ0yhTRo08hdDQxa2Fod0dDV0hDeGxWa2doVjE2Y2c&hl=en_US

    Hi,
    If you search the forum you'll find numerous threads where the comparison between BDB SQL and SQLite has been discussed, and also where suggestions for tuning BDB SQL have been made.
    From the Oracle Berkeley DB SQL API overview page under the Technical Resources subheading you can access several resources related to performance comparison between BDB SQL and SQLite.
    Please review and follow the suggestions in those guides and from previous discussions on the forum and let me know if you see BDB SQL's performance improving.
    Regards,
    Andrei

  • Semaphore

    synchornized getResource(){
    semaphore -- ;
    if( semaphore < 0 ){
    wait(); //thread sleep and procedure pause here
    do retrieve resource steps here
    synchronized putResource(){
    do put resource steps here
    semaphore ++;
    if( semaphore <= 0){
    notify();
    semaphore implementation is listed above,
    and in order to use wait(), notify(), we should use
    synchronized method or "synchronized(this)" code block
    There is a question that I still don't understand:
    If a synchronized method acquires the lock associated
    with the object, whether or not the call of "putResource()" function would wait when the call of "getResource()" acquires the lock assoicated with the object?
    if so, then why not deadlock occurs when a thread is sleeping in the sychronized method "getResource()" while another thread is trying to call "putResource()"
    if not, then what is the meaning of "lock associated with object"? What I've learned about "lock associated with object" before is when someone acquires the lock of an object, others who want to access the object should wait for the lock in advance...

    When a thread that has an object's monitor calls wait on that object, it releases the monitor as it goes into the wait state, so it is available for another thread to pick up and call notify.
    Note that calling notify does not release the monitor, so the waiting (woken up) thread cannot execute until the notifying thread releases the monitor normally.
    Ref: JLS 17.13, 17.14
    http://java.sun.com/docs/books/jls/second_edition/html/memory.doc.html#28460

  • Hash function for the DB_HASH access method

    Hello!
    I use BDB 4.5.20 and tried to use a DB_HASH access method providing my own hash function, since I have very specific keys. Keys are UUID, and they all have the same length (16 bytes) and specific binary representation which can be used to generate hash code. After I set own hash function using Db::set_h_hash() I get the call of this function during database open with a very strange data I was not expected.
    Backtrace is:
    storaged.dll!tbricks::storage::StorageBDBBackend::bdb_hash_func(Db * db=0x01dba398, const void * key=0x1318326c, unsigned int size=12) Line 35     C++
    libdb45d.dll!_db_h_hash_intercept_c(__db * cthis=0x01dba418, const void * data=0x1318326c, unsigned int len=12) Line 483 + 0x97 bytes     C++
    libdb45d.dll!__ham_init_meta(__db * dbp=0x01dba418, hashmeta33 * meta=0x01dbb120, unsigned int pgno=0, _db_lsn * lsnp=0x0013f10c) Line 291 + 0x13 bytes     C
    libdb45d.dll!__ham_new_file(__db * dbp=0x01dba418, __db_txn * txn=0x01dbacc8, __fh_t * fhp=0x01dbafb8, const char * name=0x01dbac60) Line 402 + 0x13 bytes     C
    libdb45d.dll!__db_new_file(__db * dbp=0x01dba418, __db_txn * txn=0x01dbacc8, __fh_t * fhp=0x01dbafb8, const char * name=0x01dbac60) Line 284 + 0x15 bytes     C
    libdb45d.dll!__fop_file_setup(__db * dbp=0x01dba418, __db_txn * txn=0x01dba9e0, const char * name=0x01585624, int mode=384, unsigned int flags=129, unsigned int * retidp=0x0013f458) Line 586 + 0x42 bytes     C
    libdb45d.dll!__db_open(__db * dbp=0x01dba418, __db_txn * txn=0x01dba9e0, const char * fname=0x01585624, const char * dname=0x00000000, DBTYPE type=DB_HASH, unsigned int flags=129, int mode=0, unsigned int meta_pgno=0) Line 154 + 0x1d bytes     C
    libdb45d.dll!__db_open_pp(__db * dbp=0x01dba418, __db_txn * txn=0x01dba9e0, const char * fname=0x01585624, const char * dname=0x00000000, DBTYPE type=DB_HASH, unsigned int flags=129, int mode=0) Line 1079 + 0x23 bytes     C
    libdb45d.dll!Db::open(DbTxn * txnid=0x00000000, const char * file=0x01585624, const char * database=0x00000000, DBTYPE type=DB_HASH, unsigned int flags=33554561, int mode=0) Line 313 + 0x30 bytes     C++
    storaged.dll!tbricks::storage::BDBBackend::open_db(Db & db={...}, const char * fileName=0x01585624, DBTYPE dbType=DB_HASH) Line 372 + 0x21 bytes     C++
    Db::set_h_hash() documentation says nothing about behaviour like this.
    Why BDB calls my hash function during database open while I do not put()/get() any data?
    Firstly I was implemented hash function aborting the application if key length is not equal 16, because it looks like application internal error, and for sure application was aborted because of that.
    How should I treat this data and do I have to return any hash code at all in this case?

    A quick look at the code indicates that this call is to run the hash function against a known value ("%$sniglet^&") to store the result in the DB file (or to compare the result against a result stored in the file, if the file already exists). Since a failure of this comparison prints "hash: incompatible hash function", I think we can assume that this is done to ensure the hash function being used is compatible with the hash function used when the DB file was created.
    So you need to be able to provide some form of hash output for this.

  • Can I use cp or dd to perform hot backup?

    Hi,
    I am wondering whether we can use 'cp' or 'dd' to do hot backup for bdb. IS it safe enough?
    If so, why bdb still need to supply the utility tool db_hotbackup?
    Thanks.
    -Yuan

    The behavior/implementation of 'cp' is different from system to system. In Solaris in particular it doesn't work as you might expect. db_hotbackup was written to give you a supported, reliable way to do a hot (live) backup of the database. You don't have to ship the utility, you're free to copy the code from the utility into your application itself. Would that work?
    -greg

  • Lock table is out of available lock entries

    Hi,
    I'm using BDB 4.8 via Berkeley DB XML. I'm adding a lot of XML documents (ca. 1000) in one transaction and get "Lock table is out of available lock entries". My locks number is set to 100000 (it's too much but still...).
    I know that I probably should not put so many docs in the same transaction, but why BDB throws "not enough locks" error? Aren't 100000 locks enough? (I also tried to set 1 million for testing purposes)
    As a side effect question, may I change the number of locks after environment creation (but before opening it)?
    P.S. Hope it's not offtop on this forum
    Thanks in advance,
    Vyacheslav

    Hello,
    As you mention, "Lock table is out of available lock entries" indicates that there are more locks than your underlying database environment is configured for. Please take a look at the documentation on "Configuring locking: sizing the system" section of the Berkeley DB Reference Guide at:
    http://www.oracle.com/technology/documentation/berkeley-
    db/db/programmer_reference/lock_max.html
    From there:
    The maximum number of locks required by an application cannot be easily estimated. It is possible to calculate a maximum number of locks by multiplying the maximum number of lockers, times the maximum number of lock objects, times two (two for the two possible lock modes for each object, read and write). However, this is a pessimal value, and real applications are unlikely to actually need that many locks. Reviewing the Lock subsystem statistics is the best way to determine this value.
    What information is the lock subsystem statistics showing? You can get this with db_stat -c or programmatically with the environment lock_stat method.
    Thanks,
    Sandra

Maybe you are looking for

  • Certification of Oracle Identity Manager 9.1.0.1

    Hi, My query is: The Oracle Identity Manager version 9.1.0.1 is supported for: 1. Oracle WebLogic Server 11g (10.3.5) without Cluster ...? 2. Oracle Database 11g R2 (11.2.0.1) without RAC ...? 3. The latest versions of JRockit JDK and ...? thanks

  • Can't print since last epson driver update from Software Update

    I have a Epson Workforce 600 connected wirelessly which was working just fine until I installed the driver update yesterday which was provided via Software Update. Now, I can print a test page via the Epson Software and my computer can see the printe

  • No video but sound when playing .mov files

    I'm trying to play .mov files on my computer and I get sound but no video. I've installed the latest verson of QT and no help. The computer is an older MBP. randalhillphoto

  • Data Management tools to go along with our PeopleSoft applications.

    Hi All.  We're looking into Data Management tools to go along with our PeopleSoft applications.  So far we have done a little looking into solutions from Informatica for Data-Archiving, Data-Subsetting, and Data-Masking.  All three appear to have pre

  • How to copy RRbackups folder?

    Is there anyway I can copy the RRbackups folder to another drive under Win 7 x64?